Skip to content
This repository has been archived by the owner on Oct 23, 2024. It is now read-only.

Commit

Permalink
chore: sync testing and tools dir with sdk master (#453)
Browse files Browse the repository at this point in the history
  • Loading branch information
zmalik authored and rishabh96b committed Oct 22, 2019
1 parent aca7948 commit 448ffac
Show file tree
Hide file tree
Showing 11 changed files with 292 additions and 181 deletions.
2 changes: 1 addition & 1 deletion testing/sdk_plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def wait_for_plan_status(
else:
statuses = status

initial_failures = sdk_tasks.get_failed_task_count(service_name, retry=True)
initial_failures = sdk_tasks.get_failed_task_count(service_name)
wait_start = datetime.datetime.utcnow()

@retrying.retry(
Expand Down
2 changes: 2 additions & 0 deletions testing/sdk_security.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,8 @@ def create_service_account(service_account_name: str, service_account_secret: st
account=service_account_name, secret=service_account_secret
)
)
if service_account_secret == service_account_name:
log.warning("Values for service_account_name and service_account_secret are the same.")

log.info("Remove any existing service account and/or secret")
delete_service_account(service_account_name, service_account_secret)
Expand Down
47 changes: 27 additions & 20 deletions testing/sdk_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class Task(object):
"""Entry value returned by get_summary() and get_service_tasks()"""

@staticmethod
def parse(task_entry: Dict[str, Any], agentid_to_hostname: Dict[str, str]) -> 'Task':
def parse(task_entry: Dict[str, Any], agentid_to_hostname: Dict[str, str]) -> "Task":
agent_id = task_entry["slave_id"]
matching_hostname = agentid_to_hostname.get(agent_id)
if matching_hostname:
Expand Down Expand Up @@ -155,19 +155,14 @@ def get_all_status_history(task_name: str, with_completed_tasks: bool = True) ->
return history


def get_failed_task_count(service_name: str, retry: bool = False) -> int:
history_response = sdk_cmd.cluster_request(
"GET", "/dcos-history-service/history/last", retry=retry
def get_failed_task_count(service_name: str) -> int:
return len(
[
t
for t in get_service_tasks(service_name, with_completed_tasks=True)
if t.state in FATAL_TERMINAL_TASK_STATES
]
)
history_response.raise_for_status()
history = history_response.json()
service_history = [h for h in history["frameworks"] if h.get("name") == service_name]
if not service_history:
return 0

assert len(service_history) == 1

return sum(service_history[0].get(status, 0) for status in FATAL_TERMINAL_TASK_STATES)


def check_task_count(service_name: str, expected_task_count: int) -> list:
Expand Down Expand Up @@ -195,7 +190,9 @@ def get_task_ids(service_name: str, task_prefix: str = "") -> List[str]:
return [t.id for t in get_service_tasks(service_name, task_prefix=task_prefix)]


def get_service_tasks(service_name: str, task_prefix: str = "", with_completed_tasks: bool = False) -> list:
def get_service_tasks(
service_name: str, task_prefix: str = "", with_completed_tasks: bool = False
) -> list:
"""Returns a list of task objects for tasks in the specified Mesos framework.
: param service_name: The name of the Mesos framework whose task information should be retrieved.
Expand Down Expand Up @@ -351,7 +348,7 @@ def _check_task_relaunched() -> None:


def check_scheduler_relaunched(
service_name: str, old_scheduler_task_id: str, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS,
service_name: str, old_scheduler_task_id: str, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS
) -> None:
"""
This function checks for the relaunch of a task using the same matching as is
Expand Down Expand Up @@ -464,7 +461,9 @@ def check_tasks_not_updated(service_name: str, prefix: str, old_task_ids: Iterab
), 'Tasks starting with "{}" were updated:{}'.format(prefix, task_sets)


def wait_for_active_framework(service_name: str, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS) -> None:
def wait_for_active_framework(
service_name: str, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS
) -> None:
"""
Waits until a framework with name `framework_name` is found and is active
"""
Expand All @@ -474,8 +473,16 @@ def wait_for_active_framework(service_name: str, timeout_seconds: int = DEFAULT_
wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res
)
def _wait_for_active_framework() -> bool:
return len(list(filter(
lambda fwk: fwk["name"] == service_name and fwk["active"],
sdk_cmd.cluster_request("GET", "/mesos/frameworks").json()["frameworks"]
))) > 0
return (
len(
list(
filter(
lambda fwk: fwk["name"] == service_name and fwk["active"],
sdk_cmd.cluster_request("GET", "/mesos/frameworks").json()["frameworks"],
)
)
)
> 0
)

_wait_for_active_framework()
10 changes: 3 additions & 7 deletions testing/security/transport_encryption.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,7 @@


def setup_service_account(
service_name: str,
service_account_secret: Optional[str] = None,
service_name: str, service_account_secret: Optional[str] = None
) -> Dict[str, Any]:
"""
Setup the service account for TLS. If the account or secret of the specified
Expand All @@ -26,7 +25,7 @@ def setup_service_account(
log.error("The setup of a service account requires DC/OS EE. service_name=%s", service_name)
raise Exception("The setup of a service account requires DC/OS EE")

secret = service_name if service_account_secret is None else service_account_secret
secret = service_name + "-secret" if service_account_secret is None else service_account_secret

service_account = "{}-service-account".format(service_name.replace("/", ""))

Expand Down Expand Up @@ -69,10 +68,7 @@ def setup_service_account(
return service_account_info


def cleanup_service_account(
service_name: str,
service_account_info: Dict[str, Any],
) -> None:
def cleanup_service_account(service_name: str, service_account_info: Dict[str, Any]) -> None:
"""
Clean up the specified service account.
Expand Down
22 changes: 18 additions & 4 deletions tools/ci/launch_cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,24 @@ set -e
LAUNCH_SUCCESS="False"
RETRY_LAUNCH="True"

env

while [ x"${LAUNCH_SUCCESS}" == x"False" ]; do
rm -f ${CLUSTER_INFO_FILE} # dcos-launch complains if the file already exists
dcos-launch create --config-path=${LAUNCH_CONFIG_FILE} --info-path=${CLUSTER_INFO_FILE}

# The first parameter to wrap.sh is the name of the virtual environment the
# command should run in. The rest of the parameters is the command itself.
/venvs/wrap.sh dcos-launch dcos-launch create --config-path=${LAUNCH_CONFIG_FILE} --info-path=${CLUSTER_INFO_FILE}

if [ x"$RETRY_LAUNCH" == x"True" ]; then
set +e
else
set -e
fi
dcos-launch wait --info-path=${CLUSTER_INFO_FILE} 2>&1 | tee dcos-launch-wait-output.stdout

# The first parameter to wrap.sh is the name of the virtual environment the
# command should run in. The rest of the parameters is the command itself.
/venvs/wrap.sh dcos-launch dcos-launch wait --info-path=${CLUSTER_INFO_FILE} 2>&1 | tee dcos-launch-wait-output.stdout

# Grep exits with an exit code of 1 if no lines are matched. We thus need to
# disable exit on errors.
Expand All @@ -35,8 +44,13 @@ while [ x"${LAUNCH_SUCCESS}" == x"False" ]; do
RETRY_LAUNCH="False"
set -e

# We need to wait for the current stack to be deleted
dcos-launch delete --info-path=${CLUSTER_INFO_FILE}

# We need to wait for the current stack to be deleted.
#
# The first parameter to wrap.sh is the name of the virtual environment
# the command should run in. The rest of the parameters is the command
# itself.
/venvs/wrap.sh dcos-launch dcos-launch delete --info-path=${CLUSTER_INFO_FILE}
rm -f ${CLUSTER_INFO_FILE}
echo "Cluster creation failed. Retrying after 30 seconds"
sleep 30
Expand Down
37 changes: 37 additions & 0 deletions tools/ci/steps/check_json_files.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/usr/bin/env python3

import collections
import difflib
import os.path
import json
import os
import fnmatch
import sys


framework_dir = os.getcwd() + "/"
path_list = list()
for path, subdirs, files in os.walk(framework_dir):
for name in files:
filtered_path = fnmatch.filter(
[os.path.join(path, name)], "*" + sys.argv[1] + "*universe*.json"
)
if len(filtered_path) > 0:
path_list.extend(filtered_path)

for path in path_list:
with open(path, "r") as source:
raw_data = [l.rstrip("\n") for l in source.readlines()]
formatted_data = [
l
for l in json.dumps(
json.loads("".join(raw_data), object_pairs_hook=collections.OrderedDict), indent=2
).split("\n")
]
diff = list(
difflib.unified_diff(raw_data, formatted_data, fromfile=path, tofile="formatted")
)
if diff:
print("\n" + ("\n".join(diff)))
print("{} is not formatted correctly, see diff above".format(path))
exit(1)
37 changes: 33 additions & 4 deletions tools/ci/test_runner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ export PACKAGE_REGISTRY_ENABLED
export PACKAGE_REGISTRY_STUB_URL
export DCOS_FILES_PATH

BUILD_TOOL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BUILD_TOOL_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT_DIR="${REPO_ROOT:-$1}"

SINGLE_FRAMEWORK="True"
Expand Down Expand Up @@ -66,15 +66,44 @@ else
fi


function get_public_master_url()
{
local cluster_description_file="$(mktemp)"
local attempts=5
local master_ip
# We retry, since sometimes the cluster is created, but dcos-launch has intermittent problems describing it.
for attempt in $(seq 1 ${attempts}); do
# Careful to not use a pipeline!
#
# The first parameter to wrap.sh is the name of the virtual environment
# the command should run in. The rest of the parameters is the command
# itself.
if /venvs/wrap.sh dcos-launch dcos-launch describe --info-path="${REPO_ROOT_DIR}/cluster_info.json" > "${cluster_description_file}" &&
master_ip=$(jq --raw-output --exit-status '.masters[0].public_ip' < "${cluster_description_file}")
then
echo "https://${master_ip}"
return 0
else
echo "parsing output of [dcos-launch describe] failed (attempt ${attempt})" >&2
local delay_sec=$((attempt*10))
if [[ ${attempt} -lt ${attempts} ]]; then
echo "retrying in ${delay_sec} seconds..." >&2
sleep ${delay_sec}
fi
fi
done
return 1
}

# Now create a cluster if it doesn't exist.
if [ -z "$CLUSTER_URL" ]; then
echo "No DC/OS cluster specified. Attempting to create one now"

${BUILD_TOOL_DIR}/launch_cluster.sh ${REPO_ROOT_DIR}/config.yaml ${REPO_ROOT_DIR}/cluster_info.json

if [ -f ${REPO_ROOT_DIR}/cluster_info.json ]; then
export CLUSTER_URL=https://$(dcos-launch describe --info-path=${REPO_ROOT_DIR}/cluster_info.json | jq -r .masters[0].public_ip)
if [ -z $CLUSTER_URL ]; then
export CLUSTER_URL=$(get_public_master_url)
if [ -z "${CLUSTER_URL}" ]; then
echo "Could not determine CLUSTER_URL"
exit 1
fi
Expand Down Expand Up @@ -173,7 +202,7 @@ echo "Finished integration tests at "`date`

if [ -n "$CLUSTER_WAS_CREATED" ]; then
echo "The DC/OS cluster $CLUSTER_URL was created. Please run"
echo "\t\$ dcos-launch delete --info-path=${CLUSTER_INFO_FILE}"
echo "\t\$ /venvs/wrap.sh dcos-launch dcos-launch delete --info-path=${CLUSTER_INFO_FILE}"
echo "to remove the cluster."
fi

Expand Down
Loading

0 comments on commit 448ffac

Please sign in to comment.