diff --git a/Makefile b/Makefile index 1ecc3644e..46e1da08b 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,7 @@ VERSION_FILE=./CONTROLLER_VERSION RELEASE_VER=v$(shell $(CAT_CMD) $(VERSION_FILE)) CURRENT_DIR=$(shell pwd) GIT_BRANCH:=$(shell git symbolic-ref --short HEAD 2>&1 | grep -v fatal) +LOCAL_BUILD_ARGS ?= -race # Reset branch name if this a Travis CI environment ifneq ($(strip $(TRAVIS_BRANCH)),) GIT_BRANCH:=${TRAVIS_BRANCH} @@ -23,9 +24,10 @@ TAG:=${TAG}${RELEASE_VER} .PHONY: print-global-variables +# Build the controler executalbe for use in docker image build mcad-controller: init generate-code $(info Compiling controller) - CGO_ENABLED=0 GOARCH=amd64 go build -o ${BIN_DIR}/mcad-controller ./cmd/kar-controllers/ + CGO_ENABLED=0 GOOS="linux" go build -o ${BIN_DIR}/mcad-controller ./cmd/kar-controllers/ print-global-variables: $(info "---") @@ -54,7 +56,7 @@ generate-code: $(info Generating deepcopy...) ${BIN_DIR}/deepcopy-gen -i ./pkg/apis/controller/v1beta1/ -O zz_generated.deepcopy -images: verify-tag-name +images: verify-tag-name mcad-controller $(info List executable directory) $(info repo id: ${git_repository_id}) $(info branch: ${GIT_BRANCH}) @@ -62,7 +64,7 @@ images: verify-tag-name $(info Build the docker image) docker build --quiet --no-cache --tag mcad-controller:${TAG} -f ${CURRENT_DIR}/deployment/Dockerfile.both ${CURRENT_DIR}/_output/bin -images-podman: verify-tag-name +images-podman: verify-tag-name mcad-controller $(info List executable directory) $(info repo id: ${git_repository_id}) $(info branch: ${GIT_BRANCH}) @@ -106,6 +108,12 @@ mcad-controller-private: init generate-code $(info Compiling controller) CGO_ENABLED=0 GOARCH=amd64 GOPRIVATE=github.ibm.com/* go build -tags private -modfile ./private.mod -o ${BIN_DIR}/mcad-controller ./cmd/kar-controllers/ +# Build the controller executable for use on the local host and using local build args +# the default for local build args is `-race` to turn race detection +mcad-controller-local: init generate-code + $(info Compiling controller) + go build ${LOCAL_BUILD_ARGS} -o ${BIN_DIR}/mcad-controller-local ./cmd/kar-controllers/ + coverage: # KUBE_COVER=y hack/make-rules/test.sh $(WHAT) $(TESTS) diff --git a/doc/build/build.md b/doc/build/build.md index 998c830ea..41a118045 100644 --- a/doc/build/build.md +++ b/doc/build/build.md @@ -13,7 +13,8 @@ To build `Multi-Cluster-App-Deployer`, a running Docker environment must be avai Clone this repo in your local environment: __Option 1__: Clone this github project to your local machine via HTTPS -``` + +```bash $ git clone https://github.com/project-codeflare/multi-cluster-app-dispatcher.git Cloning into 'multi-cluster-app-dispatcher'... Checking connectivity... done. @@ -22,90 +23,167 @@ $ ``` __Option 2__: Clone this github project to your local machine via SSH -``` + +```bash $ git clone git@github.com:project-codeflare/multi-cluster-app-dispatcher.git Cloning into 'multi-cluster-app-dispatcher'... Checking connectivity... done. Checking out files: 100% (####/####), done. $ - ``` + +### Additional software needed + +To build the controller and to run the end to end tests locally you will need to have the following software installed: + +* `Go` (version 1.16) -- the controller will compile and run with later versions, but currently supported version is 1.16 +* `kind` (version 0.11) -- later versions will work fine +* `kubectl` +* `helm` - version 3.0 or later +* `make` + +On MacOS you will need to have `readlink` executable installed (`brew install coreutils`) + ## 2. Building the Multi-Cluster-App-Deployer Controller ### Build the Executable -Run the build script `build.sh`: -``` -$ cd multi-cluster-app-dispatcher/deployment/ +From the root directory of the repository, you may build only the executable, or you can build the image directly. + +To to build the executable, execute: -$ ./build.sh +```bash +#build for linux OS and for use inside docker image +multi-cluster-app-dispatcher $ make mcad-controller ... -+ cd .. -+ make generate-code -Compiling deepcopy-gen -Generating deepcopy +Compiling deepcopy-gen... +Generating deepcopy... go build -o _output/bin/deepcopy-gen ./cmd/deepcopy-gen/ -_output/bin/deepcopy-gen -i ./pkg/apis/controller/v1beta1/ -O zz_generated.deepcopy -+ make kar-controller +_output/bin/deepcopy-gen -i ./pkg/apis/controller/v1beta1/ -O zz_generated.deepcopy +Compiling controller +CGO_ENABLED=0 GOOS="linux" go build -o _output/bin/mcad-controller ./cmd/kar-controllers/ + +#build for local testing purposes, by default enable the race conditions detector +multi-cluster-app-dispatcher $ make mcad-controller-local +... mkdir -p _output/bin -CGO_ENABLED=0 GOARCH=amd64 go build -o _output/bin/kar-controllers ./cmd/kar-controllers/ -$ +Compiling deepcopy-gen... +Generating deepcopy... +go build -o _output/bin/deepcopy-gen ./cmd/deepcopy-gen/ +_output/bin/deepcopy-gen -i ./pkg/apis/controller/v1beta1/ -O zz_generated.deepcopy +Compiling controller +go build -race -o _output/bin/mcad-controller-local ./cmd/kar-controllers/ ``` -Ensure the executables: `deepcopy-gen`, `mcad-controllers` are created in the target output directory: -``` -$ ls ../_output/bin/ -deepcopy-gen mcad-controller -$ +Ensure the executables: `deepcopy-gen` and `mcad-controllers` are created in the target output directory: + +```bash +multi-cluster-app-dispatcher $ ls _output/bin +deepcopy-gen mcad-controller ``` ### Build the Multi-Cluster-App-Dispatcher Image -Run the image build script `image.sh`: +If you want to run the end to end tests locally, you will need to have the docker daemon running on your workstation, and build the image using docker. Images can also be build using podman for deployment of the MCAD controller on remote clusters. -``` -$ ./image.sh -... -+ make images -Changed to executable directory -Build the docker image -cd ./_output/bin -docker build --no-cache --tag mcad-controller:v1.14 ... -Sending build context to Docker daemon 122.7MB -Step 1/7 : From ubuntu:18.04 - ---> ea4c82dcd15a -Step 2/7 : ADD mcad-controller /usr/local/bin - ---> 674cefbce55a -... - ---> 911c7c82b5ee -Step 7/7 : WORKDIR /usr/local/bin - ---> Running in f2db4649e7a6 -Removing intermediate container f2db4649e7a6 - ---> 1dbf126976cf -Successfully built 1dbf126976cf -Successfully tagged mcad-controller:v1.14 -$ -``` +From the root directory of the repository: -Note the *image name* and *image tag* from the image build script (`./image.sh`) above. For example the *image name* and *image tag* built after running the example above is `mcad-controller:v1.14`. List the Docker images to ensure the image exists. +```bash +# With docker daemon running +multi-cluster-app-dispatcher % make images +.... + +# output from a local branch, MacOS build, local file names replaced with XXXXXXXXXX +"---" +"MAKE GLOBAL VARIABLES:" +" "BIN_DIR="_output/bin" +" "GIT_BRANCH="local_e2e_test" +" "RELEASE_VER="v1.29.55" +" "TAG="local_e2e_test-v1.29.55" +"---" +# Check for invalid tag name +t=local_e2e_test-v1.29.55 && [ ${#t} -le 128 ] || { echo "Target name $t has 128 or more chars"; false; } +mkdir -p _output/bin +Compiling deepcopy-gen... +Generating deepcopy... +go build -o _output/bin/deepcopy-gen ./cmd/deepcopy-gen/ +_output/bin/deepcopy-gen -i ./pkg/apis/controller/v1beta1/ -O zz_generated.deepcopy +Compiling controller +CGO_ENABLED=0 GOOS="linux" go build -o _output/bin/mcad-controller ./cmd/kar-controllers/ +List executable directory +repo id: +branch: local_e2e_test +Build the docker image +ls -l XXXXXXXXXXXX/multi-cluster-app-dispatcher/_output/bin +total 268768 +-rwxr-xr-x 1 XXXXX staff 8238498 Apr 4 12:46 deepcopy-gen +-rwxr-xr-x 1 XXXXX staff 57584808 Apr 4 12:47 mcad-controller +docker build --quiet --no-cache --tag mcad-controller:local_e2e_test-v1.29.55 -f XXXXXXXX/multi-cluster-app-dispatcher/deployment/Dockerfile.both XXXXXX/multi-cluster-app-dispatcher/_output/bin +sha256:3b4f314b06674f6b52d6a5d77ad1d3d9cebf8fa94a9f80026b02813689c3289d + +#Using podman +make images-podman + +.... + +# output from a local branch, MacOS build, local file names replaced with XXXXXXXXXX +"---" +"MAKE GLOBAL VARIABLES:" +" "BIN_DIR="_output/bin" +" "GIT_BRANCH="local_e2e_test" +" "RELEASE_VER="v1.29.55" +" "TAG="local_e2e_test-v1.29.55" +"---" +# Check for invalid tag name +t=local_e2e_test-v1.29.55 && [ ${#t} -le 128 ] || { echo "Target name $t has 128 or more chars"; false; } +mkdir -p _output/bin +Compiling deepcopy-gen... +Generating deepcopy... +go build -o _output/bin/deepcopy-gen ./cmd/deepcopy-gen/ +_output/bin/deepcopy-gen -i ./pkg/apis/controller/v1beta1/ -O zz_generated.deepcopy +Compiling controller +CGO_ENABLED=0 GOOS="linux" go build -o _output/bin/mcad-controller ./cmd/kar-controllers/ +List executable directory +repo id: +branch: local_e2e_test +Build the docker image +ls -l XXXXXXXXXX/multi-cluster-app-dispatcher/_output/bin +total 128568 +-rwxr-xr-x 1 XXXXXXXX staff 8238498 Apr 4 12:53 deepcopy-gen +-rwxr-xr-x 1 XXXXXXXX staff 57584808 Apr 4 12:53 mcad-controller +podman build --quiet --no-cache --tag mcad-controller:local_e2e_test-v1.29.55 -f XXXXXXXXXX/multi-cluster-app-dispatcher/deployment/Dockerfile.both XXXXXXXXXX/multi-cluster-app-dispatcher/_output/bin +7553c702e5238920f44cba7303d1ff111aca1722e7e3ed4d49afbafa165fc3e3 ``` -$ docker images mcad-controller -REPOSITORY TAG IMAGE ID CREATED SIZE -mcad-controller v.1.14 1dbf126976cf 11 minutes ago 272MB -$ -``` + ### Push the Multi-Cluster-App-Dispatcher Image to an Image Repository -The following example assumes an available `/mcad-controller` on [Docker Hub](https://hub.docker.com) -``` -$ docker login -$ docker push /mcad-controller:v1.14 + +The following example assumes an available `/mcad-controller` on [Docker Hub](https://hub.docker.com) and using image version `v1.14` + +```bash +docker login +docker push /mcad-controller:v1.14 ``` The same can be done with [Quay](quay.io) -``` -$ docker login quay.io -$ docker push /mcad-controller:v1.14 + +```bash +docker login quay.io +docker push /mcad-controller:v1.14 ``` Refer to [deployment](../deploy/deployment.md) on how to deploy the `multi-cluster-app-dispatcher` as a controller in Kubernetes. + +## 3. Running e2e tests locally + +When running e2e tests, is recommended you restrict the `docker` daemon [cpu and memory resources](https://docs.docker.com/config/containers/resource_constraints/). The recomended settings are: + +* CPU: 2 +* Memory: 8 GB + +From the root directory of the repository: + +```bash +# With docker daemon running +multi-cluster-app-dispatcher % make run-e2e: +``` diff --git a/hack/e2e-kind-config.yaml b/hack/e2e-kind-config.yaml index 29f05a216..767727f01 100644 --- a/hack/e2e-kind-config.yaml +++ b/hack/e2e-kind-config.yaml @@ -1,13 +1,13 @@ # this config file contains all config fields with comments kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 -# 1 control plane node and 3 workers +# 1 control plane node and 1 workers nodes: # the control plane node config - role: control-plane # kubernetes version 1.20.7 from kind v0.11.0 image: kindest/node:v1.20.7@sha256:e645428988191fc824529fd0bb5c94244c12401cf5f5ea3bd875eb0a787f0fe9 - # the three workers + # the worker - role: worker # kubernetes version 1.20.7 from kind v0.11.0 image: kindest/node:v1.20.7@sha256:e645428988191fc824529fd0bb5c94244c12401cf5f5ea3bd875eb0a787f0fe9 diff --git a/hack/run-e2e-kind.sh b/hack/run-e2e-kind.sh index 749fb495a..2fbc101f4 100755 --- a/hack/run-e2e-kind.sh +++ b/hack/run-e2e-kind.sh @@ -27,13 +27,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -export ROOT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.. +export ROOT_DIR="$(dirname "$(dirname "$(readlink -fn "$0")")")" export LOG_LEVEL=3 -export CLEANUP_CLUSTER=${CLEANUP_CLUSTER:-1} +export CLEANUP_CLUSTER=${CLEANUP_CLUSTER:-"true"} export CLUSTER_CONTEXT="--name test" # Using older image due to older version of kubernetes cluster" -export IMAGE_NGINX="nginx:1.15.12" -export IMAGE_ECHOSERVER="k8s.gcr.io/echoserver:1.4" +export IMAGE_ECHOSERVER="kicbase/echo-server:1.0" +export IMAGE_UBUNTU_LATEST="ubuntu:latest" export KIND_OPT=${KIND_OPT:=" --config ${ROOT_DIR}/hack/e2e-kind-config.yaml"} export KA_BIN=_output/bin export WAIT_TIME="20s" @@ -41,23 +41,32 @@ export IMAGE_REPOSITORY_MCAD="${1}" export IMAGE_TAG_MCAD="${2}" export MCAD_IMAGE_PULL_POLICY="${3-Always}" export IMAGE_MCAD="${IMAGE_REPOSITORY_MCAD}:${IMAGE_TAG_MCAD}" +CLUSTER_STARTED="false" + +function update_test_host { + sudo apt-get update && sudo apt-get install -y apt-transport-https curl + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list + sudo apt-get update + # Using older version due to older version of kubernetes cluster" + sudo apt-get install -y --allow-unauthenticated kubectl=1.17.0-00 + + # Download kind binary (0.6.1) + sudo curl -o /usr/local/bin/kind -L https://github.com/kubernetes-sigs/kind/releases/download/v0.11.0/kind-linux-amd64 + sudo chmod +x /usr/local/bin/kind + + # Installing helm3 + curl -fsSL -o ${ROOT_DIR}/get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + chmod 700 ${ROOT_DIR}/get_helm.sh + ${ROOT_DIR}/get_helm.sh + sleep 10 +} -sudo apt-get update && sudo apt-get install -y apt-transport-https -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - -echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list -sudo apt-get update -# Using older version due to older version of kubernetes cluster" -sudo apt-get install -y --allow-unauthenticated kubectl=1.17.0-00 - -# Download kind binary (0.6.1) -sudo curl -o /usr/local/bin/kind -L https://github.com/kubernetes-sigs/kind/releases/download/v0.11.0/kind-linux-amd64 -sudo chmod +x /usr/local/bin/kind - -# check if kind installed +# check if pre-requizites are installed. function check-prerequisites { echo "checking prerequisites" which kind >/dev/null 2>&1 - if [[ $? -ne 0 ]]; then + if [ $? -ne 0 ]; then echo "kind not installed, exiting." exit 1 else @@ -65,11 +74,11 @@ function check-prerequisites { fi which kubectl >/dev/null 2>&1 - if [[ $? -ne 0 ]]; then + if [ $? -ne 0 ]; then echo "kubectl not installed, exiting." exit 1 else - echo -n "found kubectl, " && kubectl version --short --client + echo -n "found kubectl, " && kubectl version fi if [[ $IMAGE_REPOSITORY_MCAD == "" ]] @@ -81,34 +90,75 @@ function check-prerequisites { echo "No MCAD image tag was provided for: ${IMAGE_REPOSITORY_MCAD}." exit 1 else - echo -n "end to end test with ${IMAGE_MCAD}." + echo "end to end test with ${IMAGE_MCAD}." fi + + which helm >/dev/null 2>&1 + if [ $? -ne 0 ] + then + echo "helm not installed, exiting." + exit 1 + else + echo -n "found helm, " && helm version --short + fi + } function kind-up-cluster { - check-prerequisites echo "Running kind: [kind create cluster ${CLUSTER_CONTEXT} ${KIND_OPT}]" kind create cluster ${CLUSTER_CONTEXT} ${KIND_OPT} --wait ${WAIT_TIME} + if [ $? -ne 0 ] + then + echo "Failed to start kind cluster" + exit 1 + fi + CLUSTER_STARTED="true" + + docker pull ${IMAGE_ECHOSERVER} + if [ $? -ne 0 ] + then + echo "Failed to pull ${IMAGE_ECHOSERVER}" + exit 1 + fi + + docker pull ${IMAGE_UBUNTU_LATEST} + if [ $? -ne 0 ] + then + echo "Failed to pull ${IMAGE_UBUNTU_LATEST}" + exit 1 + fi - docker images - docker pull ${IMAGE_ECHOSERVER} - docker pull ${IMAGE_NGINX} if [[ "$MCAD_IMAGE_PULL_POLICY" = "Always" ]] then docker pull ${IMAGE_MCAD} + if [ $? -ne 0 ] + then + echo "Failed to pull ${IMAGE_MCAD}" + exit 1 + fi fi docker images - - kind load docker-image ${IMAGE_NGINX} ${CLUSTER_CONTEXT} - kind load docker-image ${IMAGE_ECHOSERVER} ${CLUSTER_CONTEXT} - kind load docker-image ${IMAGE_MCAD} ${CLUSTER_CONTEXT} + + for image in ${IMAGE_ECHOSERVER} ${IMAGE_UBUNTU_LATEST} ${IMAGE_MCAD} + do + kind load docker-image ${image} ${CLUSTER_CONTEXT} + if [ $? -ne 0 ] + then + echo "Failed to load image ${image} in cluster" + exit 1 + fi + done } # clean up function cleanup { echo "==========================>>>>> Cleaning up... <<<<<==========================" echo " " - + if [[ ${CLUSTER_STARTED} == "false" ]] + then + echo "Cluster was not started, nothing more to do." + return + fi echo "Custom Resource Definitions..." echo "kubectl get crds" @@ -145,10 +195,19 @@ function cleanup { echo "====================================================================================" echo "==========================>>>>> MCAD Controller Logs <<<<<==========================" echo "====================================================================================" - echo "kubectl logs ${mcad_pod} -n kube-system" - kubectl logs ${mcad_pod} -n kube-system - - kind delete cluster ${CLUSTER_CONTEXT} + local mcad_pod=$(kubectl get pods -n kube-system | grep mcad-controller | awk '{print $1}') + if [[ "$mcad_pod" != "" ]] + then + echo "kubectl logs ${mcad_pod} -n kube-system" + kubectl logs ${mcad_pod} -n kube-system + fi + rm -rf ${ROOT_DIR}/get_helm.sh + if [[ $CLEANUP_CLUSTER == "true" ]] + then + kind delete cluster ${CLUSTER_CONTEXT} + else + echo "Cluster requested to stay up, not deleting cluster" + fi } debug_function() { @@ -193,7 +252,7 @@ spec: spec: containers: - name: hellodiana-2-test-0 - image: k8s.gcr.io/echoserver:1.4 + image: ${IMAGE_ECHOSERVER} imagePullPolicy: Always ports: - containerPort: 80 @@ -249,13 +308,12 @@ EOF } function kube-test-env-up { - cd ${ROOT_DIR} - - echo "---" - export KUBECONFIG="$(kind get kubeconfig-path ${CLUSTER_CONTEXT})" - - echo "---" - echo "KUBECONFIG file: ${KUBECONFIG}" + # Hack to setup for 'go test' call which expects this path. + if [ ! -f $HOME/.kube/config ] + then + echo "'$HOME/.kube/config' not found" + exit 1 + fi echo "---" echo "kubectl version" @@ -269,44 +327,14 @@ function kube-test-env-up { echo "kubectl get nodes" kubectl get nodes -o wide - # Hack to setup for 'go test' call which expects this path. - if [ ! -z $HOME/.kube/config ] - then - cp $KUBECONFIG $HOME/.kube/config - - echo "---" - cat $HOME/.kube/config - fi - - # Installing helm3 - - curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 - chmod 700 get_helm.sh - ./get_helm.sh - sleep 10 - - helm version - echo "Installing Podgroup CRD" kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/scheduler-plugins/277b6bdec18f8a9e9ccd1bfeaf4b66495bfc6f92/config/crd/bases/scheduling.sigs.k8s.io_podgroups.yaml - cd deployment/mcad-controller - # start mcad controller echo "Starting MCAD Controller..." echo "helm install mcad-controller namespace kube-system wait set loglevel=2 set resources.requests.cpu=1000m set resources.requests.memory=1024Mi set resources.limits.cpu=4000m set resources.limits.memory=4096Mi set image.repository=$IMAGE_REPOSITORY_MCAD set image.tag=$IMAGE_TAG_MCAD set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY" - helm upgrade --install mcad-controller . --namespace kube-system --wait --set loglevel=2 --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set configMap.name=mcad-controller-configmap --set configMap.podCreationTimeout='"120000"' --set configMap.quotaEnabled='"false"' --set coscheduler.rbac.apiGroup=scheduling.sigs.k8s.io --set coscheduler.rbac.resource=podgroups --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY - - sleep 10 - echo "Listing MCAD Controller Helm Chart and Pod YAML..." - helm list - mcad_pod=$(kubectl get pods -n kube-system | grep mcad-controller | awk '{print $1}') - if [[ "$mcad_pod" != "" ]] - then - kubectl get pod ${mcad_pod} -n kube-system -o yaml - fi - + helm upgrade --install mcad-controller ${ROOT_DIR}/deployment/mcad-controller --namespace kube-system --wait --set loglevel=2 --set resources.requests.cpu=1000m --set resources.requests.memory=1024Mi --set resources.limits.cpu=4000m --set resources.limits.memory=4096Mi --set configMap.name=mcad-controller-configmap --set configMap.podCreationTimeout='"120000"' --set configMap.quotaEnabled='"false"' --set coscheduler.rbac.apiGroup=scheduling.sigs.k8s.io --set coscheduler.rbac.resource=podgroups --set image.repository=$IMAGE_REPOSITORY_MCAD --set image.tag=$IMAGE_TAG_MCAD --set image.pullPolicy=$MCAD_IMAGE_PULL_POLICY sleep 10 echo "Listing MCAD Controller Helm Chart and Pod YAML..." @@ -344,11 +372,17 @@ function kube-test-env-up { trap cleanup EXIT +#Only run this function if we are running on the test build machinbe, +#currently ubuntu 16.04 xenial +if [ "$(lsb_release -c -s 2>&1 | grep xenial)" == "xenial" ]; then + update_test_host +fi + +check-prerequisites + kind-up-cluster kube-test-env-up -cd ${ROOT_DIR} - echo "==========================>>>>> Running E2E tests... <<<<<==========================" -go test ./test/e2e -v -timeout 55m +go test ./test/e2e -v -timeout 75m \ No newline at end of file diff --git a/test/e2e/queue.go b/test/e2e/queue.go index 4f8010049..b99441861 100644 --- a/test/e2e/queue.go +++ b/test/e2e/queue.go @@ -155,7 +155,7 @@ var _ = Describe("AppWrapper E2E Test", func() { aw := createJobAWWithInitContainer(context, "aw-job-3-init-container", 60, "exponential", 0) appwrappers = append(appwrappers, aw) - err := waitAWPodsCompleted(context, aw, 720 * time.Second) // This test waits for 10 minutes to make sure all PODs complete + err := waitAWPodsCompleted(context, aw, 720*time.Second) // This test waits for 10 minutes to make sure all PODs complete Expect(err).NotTo(HaveOccurred()) }) @@ -173,7 +173,7 @@ var _ = Describe("AppWrapper E2E Test", func() { aw := createJobAWWithInitContainer(context, "aw-job-3-init-container", 1, "none", 3) appwrappers = append(appwrappers, aw) - err := waitAWPodsCompleted(context, aw, 300 * time.Second) + err := waitAWPodsCompleted(context, aw, 300*time.Second) Expect(err).To(HaveOccurred()) }) @@ -615,7 +615,7 @@ var _ = Describe("AppWrapper E2E Test", func() { aw := createGenericJobAWWithScheduleSpec(context, "aw-test-job-with-scheduling-spec") err1 := waitAWPodsReady(context, aw) Expect(err1).NotTo(HaveOccurred()) - err2 := waitAWPodsCompleted(context, aw, 90 * time.Second) + err2 := waitAWPodsCompleted(context, aw, 90*time.Second) Expect(err2).NotTo(HaveOccurred()) // Once pods are completed, we wait for them to see if they change their status to anything BUT "Completed" @@ -733,8 +733,8 @@ var _ = Describe("AppWrapper E2E Test", func() { context := initTestContext() var aws []*arbv1.AppWrapper - //appwrappersPtr := &aws - //defer cleanupTestObjectsPtr(context, appwrappersPtr) + // appwrappersPtr := &aws + // defer cleanupTestObjectsPtr(context, appwrappersPtr) const ( awCount = 100 diff --git a/test/e2e/util.go b/test/e2e/util.go index cd2dad5c9..5187413d4 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -740,7 +740,7 @@ func waitAWPodsReadyEx(ctx *context, aw *arbv1.AppWrapper, taskNum int, quite bo []v1.PodPhase{v1.PodRunning, v1.PodSucceeded}, taskNum, quite)) } -func waitAWPodsCompletedEx(ctx *context, aw *arbv1.AppWrapper, taskNum int, quite bool, timeout time.Duration ) error { +func waitAWPodsCompletedEx(ctx *context, aw *arbv1.AppWrapper, taskNum int, quite bool, timeout time.Duration) error { return wait.Poll(100*time.Millisecond, timeout, awPodPhase(ctx, aw, []v1.PodPhase{v1.PodSucceeded}, taskNum, quite)) } @@ -831,7 +831,7 @@ func createReplicaSet(context *context, name string, rep int32, img string, req return deployment } -func createJobAWWithInitContainer(context *context, name string, requeuingTimeInSeconds int, requeuingGrowthType string, requeuingMaxNumRequeuings int ) *arbv1.AppWrapper { +func createJobAWWithInitContainer(context *context, name string, requeuingTimeInSeconds int, requeuingGrowthType string, requeuingMaxNumRequeuings int) *arbv1.AppWrapper { rb := []byte(`{"apiVersion": "batch/v1", "kind": "Job", "metadata": { @@ -894,8 +894,8 @@ func createJobAWWithInitContainer(context *context, name string, requeuingTimeIn SchedSpec: arbv1.SchedulingSpecTemplate{ MinAvailable: minAvailable, Requeuing: arbv1.RequeuingTemplate{ - TimeInSeconds: requeuingTimeInSeconds, - GrowthType: requeuingGrowthType, + TimeInSeconds: requeuingTimeInSeconds, + GrowthType: requeuingGrowthType, MaxNumRequeuings: requeuingMaxNumRequeuings, }, }, @@ -953,7 +953,7 @@ func createDeploymentAW(context *context, name string) *arbv1.AppWrapper { "containers": [ { "name": "aw-deployment-3", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -1029,7 +1029,7 @@ func createDeploymentAWwith900CPU(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-deployment-2-900cpu", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "900m" @@ -1110,7 +1110,7 @@ func createDeploymentAWwith550CPU(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-deployment-2-550cpu", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "550m" @@ -1191,7 +1191,7 @@ func createDeploymentAWwith125CPU(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-deployment-2-125cpu", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "125m" @@ -1272,7 +1272,7 @@ func createDeploymentAWwith126CPU(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-deployment-2-126cpu", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "126m" @@ -1353,7 +1353,7 @@ func createDeploymentAWwith350CPU(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-deployment-2-350cpu", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "350m" @@ -1434,7 +1434,7 @@ func createDeploymentAWwith351CPU(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-deployment-2-351cpu", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "351m" @@ -1515,7 +1515,7 @@ func createDeploymentAWwith426CPU(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-deployment-2-426cpu", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "426m" @@ -1596,7 +1596,7 @@ func createDeploymentAWwith425CPU(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-deployment-2-425cpu", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "425m" @@ -1677,7 +1677,7 @@ func createGenericDeploymentAW(context *context, name string) *arbv1.AppWrapper "containers": [ { "name": "aw-generic-deployment-3", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -2264,7 +2264,7 @@ func createGenericDeploymentAWWithMultipleItems(context *context, name string) * "containers": [ { "name": "aw-deployment-2-status", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -2305,7 +2305,7 @@ func createGenericDeploymentAWWithMultipleItems(context *context, name string) * "containers": [ { "name": "aw-deployment-3-status", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -2392,7 +2392,7 @@ func createGenericDeploymentAWWithService(context *context, name string) *arbv1. "containers": [ { "name": "aw-deployment-3-status", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -2515,7 +2515,7 @@ func createGenericDeploymentWithCPUAW(context *context, name string, cpuDemand s "containers": [ { "name": "%s", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "%s" @@ -2597,7 +2597,7 @@ func createGenericDeploymentCustomPodResourcesWithCPUAW(context *context, name s "containers": [ { "name": "%s", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "requests": { "cpu": "%s" @@ -2764,7 +2764,7 @@ func createStatefulSetAW(context *context, name string) *arbv1.AppWrapper { "containers": [ { "name": "aw-statefulset-2", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "imagePullPolicy": "Never", "ports": [ { @@ -2841,7 +2841,7 @@ func createGenericStatefulSetAW(context *context, name string) *arbv1.AppWrapper "containers": [ { "name": "aw-generic-statefulset-2", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "imagePullPolicy": "Never", "ports": [ { @@ -2886,9 +2886,11 @@ func createGenericStatefulSetAW(context *context, name string) *arbv1.AppWrapper return appwrapper } -//NOTE: Recommend this test not to be the last test in the test suite it may pass -// may pass the local test but may cause controller to fail which is not -// part of this test's validation. +// NOTE: +// +// Recommend this test not to be the last test in the test suite it may pass +// may pass the local test but may cause controller to fail which is not +// part of this test's validation. func createBadPodTemplateAW(context *context, name string) *arbv1.AppWrapper { rb := []byte(`{"apiVersion": "v1", "kind": "Pod", @@ -2904,7 +2906,7 @@ func createBadPodTemplateAW(context *context, name string) *arbv1.AppWrapper { "containers": [ { "name": "aw-bad-podtemplate-2", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -2971,7 +2973,7 @@ func createPodTemplateAW(context *context, name string) *arbv1.AppWrapper { "containers": [ { "name": "aw-podtemplate-2", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -3038,7 +3040,7 @@ func createPodCheckFailedStatusAW(context *context, name string) *arbv1.AppWrapp "containers": [ { "name": "aw-checkfailedstatus-1", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -3109,7 +3111,7 @@ func createGenericPodAWCustomDemand(context *context, name string, cpuDemand str "containers": [ { "name": "%s", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "limits": { "cpu": "%s" @@ -3183,7 +3185,7 @@ func createGenericPodAW(context *context, name string) *arbv1.AppWrapper { "containers": [ { "name": "aw-generic-pod-1", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "limits": { "memory": "150Mi" @@ -3256,7 +3258,7 @@ func createGenericPodTooBigAW(context *context, name string) *arbv1.AppWrapper { "containers": [ { "name": "aw-generic-big-pod-1", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "resources": { "limits": { "cpu": "100", @@ -3329,7 +3331,7 @@ func createBadGenericPodAW(context *context, name string) *arbv1.AppWrapper { "containers": [ { "name": "aw-bad-generic-pod-1", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 @@ -3429,7 +3431,7 @@ func createBadGenericPodTemplateAW(context *context, name string) (*arbv1.AppWra "containers": [ { "name": "aw-generic-podtemplate-2", - "image": "k8s.gcr.io/echoserver:1.4", + "image": "kicbase/echo-server:1.0", "ports": [ { "containerPort": 80 diff --git a/test/yaml/0001-aw-generic-deployment-3.yaml b/test/yaml/0001-aw-generic-deployment-3.yaml index d2ff3a10d..5b4038841 100644 --- a/test/yaml/0001-aw-generic-deployment-3.yaml +++ b/test/yaml/0001-aw-generic-deployment-3.yaml @@ -25,7 +25,7 @@ spec: spec: containers: - name: 0001-aw-generic-deployment-3 - image: k8s.gcr.io/echoserver:1.4 + image: kicbase/echo-server:1.0 ports: - containerPort: 80 resources: