-
Notifications
You must be signed in to change notification settings - Fork 12
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
kind.sh on macos including calico #18
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -151,11 +151,84 @@ if [ "$VERBOSE" == true ]; then | |
set -o xtrace | ||
fi | ||
|
||
function wait_for_pods { | ||
for ns in "$namespace"; do | ||
for pod in $(kubectl get pods -n $ns | grep -v NAME | awk '{ print $1 }'); do | ||
counter=0 | ||
echo kubectl get pod $pod -n $ns | ||
kubectl get pod $pod -n $ns | ||
while [[ $(kubectl get pods $pod -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}' -n $ns) != True ]]; do | ||
sleep 1 | ||
let counter=counter+1 | ||
|
||
if ((counter == $sleep)); then | ||
echo "POD $pod failed to start in $sleep seconds" | ||
kubectl get events -n $ns --sort-by='.lastTimestamp' | ||
echo "Exiting" | ||
|
||
exit -1 | ||
fi | ||
done | ||
done | ||
done | ||
} | ||
|
||
# Install Calico in Controller... | ||
echo Switch to controller context and Install Calico... | ||
kubectx $PREFIX$CONTROLLER | ||
kubectx | ||
|
||
echo Install the Tigera Calico operator... | ||
kubectl create -f https://projectcalico.docs.tigera.io/manifests/tigera-operator.yaml | ||
|
||
echo Install the custom resource definitions manifest... | ||
kubectl create -f https://projectcalico.docs.tigera.io/manifests/custom-resources.yaml | ||
sleep 120 | ||
|
||
echo "Check for Calico namespaces, pods" | ||
kubectl get ns | ||
kubectl get pods -n calico-system | ||
echo "Wait for Calico to be Running" | ||
namespace=calico-system | ||
sleep=900 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sleep 900 seems excessive? If it's not going to work, could we fail faster? Fwiw, in my testing it tool 32sec for calico to be ready... so 60 or 90 might be fine? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's taking more than 600secs in Amazon EC2, macOS Monterey 12.4 Instance, that why updated with Sleep 900. |
||
wait_for_pods | ||
|
||
kubectl get pods -n calico-system | ||
|
||
# Install Calico in Worker... | ||
for WORKER in ${WORKERS[@]}; do | ||
|
||
# Install Calico in Worker... | ||
echo Switch to worker context and Install Calico... | ||
kubectx $PREFIX$WORKER | ||
kubectx | ||
|
||
echo Install the Tigera Calico operator... | ||
kubectl create -f https://projectcalico.docs.tigera.io/manifests/tigera-operator.yaml | ||
|
||
echo Install the custom resource definitions manifest... | ||
kubectl create -f https://projectcalico.docs.tigera.io/manifests/custom-resources.yaml | ||
sleep 120 | ||
|
||
echo "Check for Calico namespaces, pods" | ||
kubectl get ns | ||
kubectl get pods -n calico-system | ||
echo "Wait for Calico to be Running" | ||
namespace=calico-system | ||
sleep=900 | ||
wait_for_pods | ||
|
||
kubectl get pods -n calico-system | ||
|
||
done | ||
|
||
# Helm repo access | ||
echo Setting up helm... | ||
helm repo remove kubeslice | ||
helm repo add kubeslice $REPO | ||
helm repo update | ||
helm repo list | ||
helm search repo kubeslice | ||
|
||
# Controller setup... | ||
echo Switch to controller context and set it up... | ||
|
@@ -167,7 +240,9 @@ helm install cert-manager kubeslice/cert-manager --namespace cert-manager --cre | |
echo "Check for cert-manager pods" | ||
kubectl get pods -n cert-manager | ||
echo "Wait for cert-manager to be Running" | ||
sleep 30 | ||
namespace=cert-manager | ||
sleep=60 | ||
wait_for_pods | ||
|
||
kubectl get pods -n cert-manager | ||
|
||
|
@@ -191,16 +266,18 @@ helm install kubeslice-controller kubeslice/kubeslice-controller -f controller-c | |
echo Check for status... | ||
kubectl get pods -n kubeslice-controller | ||
echo "Wait for kubeslice-controller-manager to be Running" | ||
sleep 90 | ||
namespace=kubeslice-controller | ||
sleep=180 | ||
wait_for_pods | ||
|
||
kubectl get pods -n kubeslice-controller | ||
|
||
echo kubectl apply -f project.yaml -n kubeslice-controller | ||
kubectl apply -f project.yaml -n kubeslice-controller | ||
sleep 10 | ||
sleep 30 | ||
|
||
echo kubectl get project -n kubeslice-avesha | ||
kubectl get project -n kubeslice-avesha | ||
echo kubectl get project -n kubeslice-controller | ||
kubectl get project -n kubeslice-controller | ||
|
||
echo kubectl get sa -n kubeslice-avesha | ||
kubectl get sa -n kubeslice-avesha | ||
|
@@ -264,8 +341,12 @@ for WORKER in ${WORKERS[@]}; do | |
echo Check for status... | ||
kubectl get pods -n kubeslice-system | ||
echo "Wait for kubeslice-system to be Running" | ||
sleep 60 | ||
namespace=kubeslice-system | ||
sleep=300 | ||
wait_for_pods | ||
kubectl get pods -n kubeslice-system | ||
# Iperf Namespace | ||
echo Create Iperf Namespace | ||
kubectl create ns iperf | ||
done | ||
|
||
|
@@ -288,12 +369,14 @@ echo kubectl apply -f $SFILE -n kubeslice-avesha | |
kubectl apply -f $SFILE -n kubeslice-avesha | ||
|
||
echo "Wait for vl3(slice) and gateway pod to be Running in worker clusters" | ||
sleep 120 | ||
|
||
echo "Final status check..." | ||
for WORKER in ${WORKERS[@]}; do | ||
echo $PREFIX$WORKER | ||
kubectx $PREFIX$WORKER | ||
namespace=kubeslice-system | ||
sleep=240 | ||
wait_for_pods | ||
kubectx | ||
kubectl get pods -n kubeslice-system | ||
done | ||
|
@@ -307,7 +390,9 @@ kubectx | |
|
||
kubectl apply -f iperf-sleep.yaml -n iperf | ||
echo "Wait for iperf to be Running" | ||
sleep 60 | ||
namespace=iperf | ||
sleep=120 | ||
wait_for_pods | ||
kubectl get pods -n iperf | ||
|
||
# Switch to kind-worker-2 context | ||
|
@@ -317,7 +402,9 @@ for WORKER in ${WORKERS[@]}; do | |
kubectx | ||
kubectl apply -f iperf-server.yaml -n iperf | ||
echo "Wait for iperf to be Running" | ||
sleep 60 | ||
namespace=iperf | ||
sleep=120 | ||
wait_for_pods | ||
kubectl get pods -n iperf | ||
fi | ||
done | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm tempted to say wait_for_... should be generalized to wait for -condition- on -command- and be put into the util dir so anything can use it.
But we've got big changes in this file coming when Deepankar's slicectl is ready for use... so fine to hold off until then.