Skip to content

Commit

Permalink
Merge pull request projectcalico#119 from caseydavenport/some-more-sts
Browse files Browse the repository at this point in the history
Add test for many services, endpoint scaling
  • Loading branch information
caseydavenport authored Dec 5, 2018
2 parents d3df193 + 3349ec3 commit 2e6495a
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 16 deletions.
22 changes: 13 additions & 9 deletions tests/k8st/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,13 @@ class TestBase(TestCase):
"""
Base class for test-wide methods.
"""
@classmethod
def setUpClass(cls):
cls.check_calico_version()

def setUp(self):
"""
Clean up before every test.
"""
self.cluster = self.k8s_client()
self.check_calico_version()

# Log a newline to ensure that the first log appears on its own line.
logger.info("")
Expand Down Expand Up @@ -109,7 +107,11 @@ def check_pod_status(self, ns):
def create_namespace(self, ns_name):
self.cluster.create_namespace(client.V1Namespace(metadata=client.V1ObjectMeta(name=ns_name)))

def create_service(self, image, name, ns, port, replicas=1, svc_type="NodePort", traffic_policy="Local", cluster_ip=None):
def deploy(self, image, name, ns, port, replicas=1, svc_type="NodePort", traffic_policy="Local", cluster_ip=None):
"""
Creates a deployment and corresponding service with the given
parameters.
"""
# Run a deployment with <replicas> copies of <image>, with the
# pods labelled with "app": <name>.
deployment = client.ExtensionsV1beta1Deployment(
Expand All @@ -132,14 +134,17 @@ def create_service(self, image, name, ns, port, replicas=1, svc_type="NodePort",

# Create a service called <name> whose endpoints are the pods
# with "app": <name>; i.e. those just created above.
self.create_service(name, name, ns, port, svc_type, traffic_policy)

def create_service(self, name, app, ns, port, svc_type="NodePort", traffic_policy="Local", cluster_ip=None):
service = client.V1Service(
metadata=client.V1ObjectMeta(
name=name,
labels={"name": name},
),
spec={
"ports": [{"port": port}],
"selector": {"app": name},
"selector": {"app": app},
"type": svc_type,
"externalTrafficPolicy": traffic_policy,
}
Expand All @@ -150,10 +155,9 @@ def create_service(self, image, name, ns, port, replicas=1, svc_type="NodePort",
body=service,
namespace=ns,
)
logger.debug("Service created. status='%s'" % str(api_response.status))
logger.debug("Additional Service created. status='%s'" % str(api_response.status))

@classmethod
def check_calico_version(cls):
def check_calico_version(self):
config.load_kube_config(os.environ.get('KUBECONFIG'))
api = client.AppsV1Api(client.ApiClient())
node_ds = api.read_namespaced_daemon_set("calico-node", "kube-system", exact=True, export=True)
Expand All @@ -163,7 +167,7 @@ def check_calico_version(cls):
container.image = "calico/node:latest-amd64"
api.replace_namespaced_daemon_set("calico-node", "kube-system", node_ds)
time.sleep(3)
retry_until_success(cls.check_pod_status, retries=20, wait_time=3, function_args=["kube-system"])
retry_until_success(self.check_pod_status, retries=20, wait_time=3, function_args=["kube-system"])

def wait_until_exists(self, name, resource_type, ns="default"):
retry_until_success(run, function_args=["kubectl get %s %s -n%s" % (resource_type, name, ns)])
Expand Down
55 changes: 49 additions & 6 deletions tests/k8st/tests/test_bgp_advert.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,9 @@ def setUp(self):
# set CALICO_ADVERTISE_CLUSTER_IPS=10.96.0.0/12
self.update_ds_env("calico-node", "kube-system", "CALICO_ADVERTISE_CLUSTER_IPS", "10.96.0.0/12")

# Enable debug logging
self.update_ds_env("calico-node", "kube-system", "BGP_LOGSEVERITYSCREEN", "debug")

# Establish BGPPeer from cluster nodes to node-extra using calicoctl
run("""kubectl exec -i -n kube-system calicoctl -- /calicoctl apply -f - << EOF
apiVersion: projectcalico.org/v3
Expand All @@ -121,11 +124,11 @@ def setUp(self):
""")

def tearDown(self):
self.delete_and_confirm(self.ns, "ns")
try:
run("docker rm -f kube-node-extra")
except subprocess.CalledProcessError:
pass
self.delete_and_confirm(self.ns, "ns")

def get_svc_cluster_ip(self, svc, ns):
return run("kubectl get svc %s -n %s -o json | jq -r .spec.clusterIP" % (svc, ns)).strip()
Expand All @@ -152,8 +155,8 @@ def test_mainline(self):
# Create both a Local and a Cluster type NodePort service with a single replica.
local_svc = "nginx-local"
cluster_svc = "nginx-cluster"
self.create_service("nginx:1.7.9", local_svc, self.ns, 80)
self.create_service("nginx:1.7.9", cluster_svc, self.ns, 80, traffic_policy="Cluster")
self.deploy("nginx:1.7.9", local_svc, self.ns, 80)
self.deploy("nginx:1.7.9", cluster_svc, self.ns, 80, traffic_policy="Cluster")
self.wait_until_exists(local_svc, "svc", self.ns)
self.wait_until_exists(cluster_svc, "svc", self.ns)

Expand All @@ -162,8 +165,8 @@ def test_mainline(self):
cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)

# Assert that both nginx service can be curled from the external node.
retry_until_success(curl,function_args=[local_svc_ip])
retry_until_success(curl,function_args=[cluster_svc_ip])
retry_until_success(curl, function_args=[local_svc_ip])
retry_until_success(curl, function_args=[cluster_svc_ip])

# Assert that local clusterIP is an advertised route and cluster clusterIP is not.
retry_until_success(lambda: self.assertIn(local_svc_ip, self.get_routes()))
Expand Down Expand Up @@ -194,7 +197,7 @@ def test_mainline(self):
retry_until_success(curl, function_args=[local_svc_ip])

# Connectivity to nginx-cluster will rarely succeed because it is load-balanced across all nodes.
# When the traffic hits a node that doesn't host one of the service's pod, it will be re-routed
# When the traffic hits a node that doesn't host one of the service's pod, it will be re-routed
# to another node and SNAT will cause the policy to drop the traffic.
# Try to curl 10 times.
try:
Expand All @@ -217,3 +220,43 @@ def test_mainline(self):
# Assert that clusterIP is no longer and advertised route
retry_until_success(lambda: self.assertNotIn(local_svc_ip, self.get_routes()))

def test_many_services(self):
"""
Creates a lot of services quickly
"""
# Assert that a route to the service IP range is present.
retry_until_success(lambda: self.assertIn("10.96.0.0/12", self.get_routes()))

# Create a local service and deployment.
local_svc = "nginx-local"
self.deploy("nginx:1.7.9", local_svc, self.ns, 80)

# Get clusterIPs.
cluster_ips = []
cluster_ips.append(self.get_svc_cluster_ip(local_svc, self.ns))

# Create many more services which select this deployment.
num_svc = 300
for i in range(num_svc):
name = "nginx-svc-%s" % i
self.create_service(name, local_svc, self.ns, 80)

# Get all of their IPs.
for i in range(num_svc):
name = "nginx-svc-%s" % i
cluster_ips.append(self.get_svc_cluster_ip(name, self.ns))

# Assert they are all advertised to the other node. This should happen
# quickly enough that by the time we have queried all services from
# the k8s API, they should be programmed on the remote node.
routes = self.get_routes()
for cip in cluster_ips:
self.assertIn(cip, routes)

# Scale to 0 replicas, assert all routes are removed.
self.scale_deployment(local_svc, self.ns, 0)
def check_routes_gone():
routes = self.get_routes()
for cip in cluster_ips:
self.assertNotIn(cip, routes)
retry_until_success(check_routes_gone, retries=10, wait_time=5)
2 changes: 1 addition & 1 deletion tests/k8st/tests/test_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class TestSimplePolicy(TestBase):
def setUp(self):
TestBase.setUp(self)
self.create_namespace("policy-demo")
self.create_service("nginx:1.7.9", "nginx", "policy-demo", 80)
self.deploy("nginx:1.7.9", "nginx", "policy-demo", 80)

# Create two client pods that live for the duration of the
# test. We will use 'kubectl exec' to try wgets from these at
Expand Down

0 comments on commit 2e6495a

Please sign in to comment.