From b3e6178026802ba59514a5698411c43549f5f2cb Mon Sep 17 00:00:00 2001 From: Vasyl Yurkovych <59879559+yurkovychv@users.noreply.github.com> Date: Wed, 12 Feb 2025 11:49:43 +0200 Subject: [PATCH] PMM-13734 helm tests (#767) * PMM-13734 helm tests * PMM-13734 update workdir * PMM-13734 update branch * PMM-13734 update branch * PMM-13734 update branch * PMM-13734 add setup scripts * PMM-13734 add setup scripts * PMM-13734 update workflow * PMM-13734 bring back teardown * PMM-13734 tweak * Update helm-tests.yml * PMM-13734 update test names --- .github/workflows/helm-tests.yml | 113 ++++++++++++++ k8s/helm-test.bats | 251 +++++++++++++++++++++++++++++++ k8s/k8s_helper.sh | 44 ++++++ k8s/pmm_helper.sh | 35 +++++ k8s/setup_bats_libs.sh | 23 +++ 5 files changed, 466 insertions(+) create mode 100644 .github/workflows/helm-tests.yml create mode 100644 k8s/helm-test.bats create mode 100644 k8s/k8s_helper.sh create mode 100644 k8s/pmm_helper.sh create mode 100755 k8s/setup_bats_libs.sh diff --git a/.github/workflows/helm-tests.yml b/.github/workflows/helm-tests.yml new file mode 100644 index 00000000..2bbaed01 --- /dev/null +++ b/.github/workflows/helm-tests.yml @@ -0,0 +1,113 @@ +name: Helm tests + +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + inputs: + server_image: + description: "server image: repo/name:tag" + default: "perconalab/pmm-server:3-dev-latest" + required: true + type: string + client_image: + description: "client image: repo/name:tag" + default: "perconalab/pmm-client:3-dev-latest" + required: true + type: string + pmm_qa_branch: + description: "Branch for pmm-qa to checkout" + default: "v3" + required: false + type: string + sha: + description: "commit sha to report status" + required: false + type: string + + workflow_call: + inputs: + server_image: + required: true + type: string + client_image: + required: true + type: string + pmm_qa_branch: + required: false + type: string + sha: + required: false + type: string + +jobs: + helm-tests: + runs-on: ubuntu-latest + env: + WORK_DIR: pmm-qa/k8s + GH_API_TOKEN: ${{ secrets.GH_API_TOKEN }} + PMM_QA_BRANCH: ${{ inputs.pmm_qa_branch || 'v3' }} + SERVER_IMAGE: ${{ inputs.server_image || 'perconalab/pmm-server:3-dev-latest' }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.PMM_QA_BRANCH }} + repository: percona/pmm-qa + path: ./pmm-qa + + - name: Set up bats globally + run: | + git clone https://github.com/bats-core/bats-core.git /opt/bats + sudo /opt/bats/install.sh /usr/local + + - name: Set up bats libraries + working-directory: ${{ env.WORK_DIR }} + run: | + ./setup_bats_libs.sh + echo "BATS_LIB_PATH=$(pwd)/lib" >> $GITHUB_ENV + + - name: Start minikube + run: | + minikube start + minikube addons disable storage-provisioner + + ### Install CSI drivers for snapshots + kubectl delete storageclass standard + minikube addons enable csi-hostpath-driver + minikube addons enable volumesnapshots + kubectl patch storageclass csi-hostpath-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' + kubectl wait --for=condition=Ready node --timeout=90s minikube + + - name: Run helm tests + working-directory: ${{ env.WORK_DIR }} + run: | + echo $(git submodule status) + + export IMAGE_REPO=$(echo $SERVER_IMAGE | cut -d ':' -f 1) + export IMAGE_TAG=$(echo $SERVER_IMAGE | cut -d ':' -f 2) + bats --tap helm-test.bats + + - name: Create status check + if: ${{ always() && inputs.sha }} + continue-on-error: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + RUN_ID: ${{ github.run_id }} + SHA: ${{ inputs.sha }} + STATUS: ${{ job.status }} + run: | + if [ "${STATUS}" = "cancelled" ]; then + STATUS="error" + fi + + gh api \ + --method POST \ + -H "Accept: application/vnd.github.v3+json" \ + /repos/$REPO/statuses/$SHA \ + -f state="$STATUS" \ + -f target_url="https://github.com/$REPO/actions/runs/$RUN_ID" \ + -f description="Helm Tests status: $STATUS" \ + -f context='actions/workflows/helm-tests' diff --git a/k8s/helm-test.bats b/k8s/helm-test.bats new file mode 100644 index 00000000..7257a52e --- /dev/null +++ b/k8s/helm-test.bats @@ -0,0 +1,251 @@ +## pmm k8s helm tests +### needs: helm, kubectl, k8s cluster with snapshotclass, default kubeconfig +## add comment #bats test_tags=bats:focus above the test to focus it + +# minikube delete && \ +# minikube start && \ +# minikube addons disable storage-provisioner && \ +# kubectl delete storageclass standard && \ +# minikube addons enable csi-hostpath-driver && \ +# minikube addons enable volumesnapshots && \ +# kubectl patch storageclass csi-hostpath-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' && \ +# kubectl wait --for=condition=Ready node --timeout=90s minikube && \ +# bats helm-test.bats + +cleanup () { + echo "--------cleanup---------" + helm list --short | xargs helm uninstall || true + kubectl delete pod,service,statefulset,configmap,secret,serviceaccount,volumesnapshot --selector=app.kubernetes.io/name=pmm --force || true + delete_pvc || true + rm values.yaml || true + echo "------------------------" +} + +setup() { + echo "Running setup" + PROJECT_ROOT=$(dirname "$BATS_TEST_FILENAME") + echo "Project root: $PROJECT_ROOT" + source "$PROJECT_ROOT/k8s_helper.sh" + source "$PROJECT_ROOT/pmm_helper.sh" + IMAGE_REPO=${IMAGE_REPO:-"perconalab/pmm-server"} + IMAGE_TAG=${IMAGE_TAG:-"3-dev-latest"} + RELEASE_REPO="percona/pmm-server" + RELEASE_TAG="3" + + cleanup +} + +teardown() { + echo "Running teardown" + echo "-------debug info-------" + kubectl get pods + kubectl describe pod --selector=app.kubernetes.io/name=pmm + kubectl get events --sort-by=lastTimestamp + kubectl logs --all-containers --timestamps --selector=app.kubernetes.io/name=pmm + echo "------------------------" + + cleanup +} + +# Helper function to trim whitespace +trim() { + local var="$*" + # remove leading whitespace characters + var="${var#"${var%%[![:space:]]*}"}" + # remove trailing whitespace characters + var="${var%"${var##*[![:space:]]}"}" + echo -n "$var" +} + +# Function to update values.yaml based on the OS +update_values_yaml() { + local property=$1 + local value=$2 + + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + # Linux + sed -i "s|$property: .*|$property: \"$value\"|g" values.yaml + elif [[ "$OSTYPE" == "darwin"* ]]; then + # macOS + sed -i '' "s|$property: .*|$property: \"$value\"|g" values.yaml + else + echo "Unsupported OS: $OSTYPE" + return 1 + fi +} + + +@test "add helm repo" { + helm repo add percona https://percona.github.io/percona-helm-charts/ +} + +@test "generate values.yaml" { + helm show values percona/pmm > values.yaml +} + +@test "install/uninstall default chart and check connectivity" { + stop_port_forward + helm install pmm \ + --set image.repository=$IMAGE_REPO \ + --set image.tag=$IMAGE_TAG \ + --wait \ + percona/pmm + + wait_for_pmm + start_port_forward + + pmm_version=$(get_pmm_version) + echo "pmm_version is ${pmm_version}" + + stop_port_forward + helm uninstall --wait --timeout 60s pmm + # maybe pmm uninstall has ability to kill pvcs + # add validation that there is no load balancer, stateful set and containers/pods left + delete_pvc +} + +@test "install/uninstall with parameter set in cli" { + stop_port_forward + local instance_name="pmm1" + helm install $instance_name \ + --set image.repository=$IMAGE_REPO \ + --set image.tag=$IMAGE_TAG \ + --set-string pmmEnv.PMM_ENABLE_ACCESS_CONTROL="1" \ + --set service.type="NodePort" \ + --wait \ + percona/pmm + wait_for_pmm + + start_port_forward + + result=$(get_env_variable $instance_name "PMM_ENABLE_ACCESS_CONTROL") + trimmed_result=$(trim "$result") + assert_equal "$trimmed_result" "1" + + pmm_version=$(get_pmm_version) + echo "pmm_version is ${pmm_version}" + + stop_port_forward + # add check that pmm is working and env var was set + + helm uninstall --wait --timeout 60s pmm1 + delete_pvc +} + +@test "install/uninstall chart with file" { + stop_port_forward + helm show values percona/pmm > values.yaml + + update_values_yaml "tag" "$IMAGE_TAG" + update_values_yaml "repository" "$IMAGE_REPO" + + helm install pmm2 -f values.yaml --wait percona/pmm + wait_for_pmm + start_port_forward + + pmm_version=$(get_pmm_version) + echo "pmm_version is ${pmm_version}" + + helm uninstall --wait --timeout 60s pmm2 + delete_pvc +} + +@test "install last released V3 version, upgrade to V3 and uninstall" { + stop_port_forward + helm show values percona/pmm > values.yaml + + update_values_yaml "tag" "$RELEASE_TAG" + update_values_yaml "repository" "$RELEASE_REPO" + + helm install pmm3 -f values.yaml --wait percona/pmm + wait_for_pmm + start_port_forward + + pmm_version=$(get_pmm_version) + echo "pmm_version is ${pmm_version}" + + update_values_yaml "tag" "$IMAGE_TAG" + update_values_yaml "repository" "$IMAGE_REPO" + + helm upgrade pmm3 -f values.yaml --set podSecurityContext.runAsGroup=null --set podSecurityContext.fsGroup=null percona/pmm + sleep 7 # give a chance to update manifest + wait_for_pmm + + pmm_version=$(get_pmm_version) + + local new_ver=$(kubectl get pod --selector=app.kubernetes.io/name=pmm -o jsonpath="{.items[*].spec.containers[*].image}") + + if [ "$new_ver" != "$IMAGE_REPO:$IMAGE_TAG" ]; then + echo "Unexpected version: $new_ver , should be '$IMAGE_REPO:$IMAGE_TAG'" + cat values.yaml + false + fi + + stop_port_forward + helm uninstall --wait --timeout 60s pmm3 + delete_pvc +} + +@test "install last released V2 version, upgrade to V3 and uninstall" { + stop_port_forward + helm show values --version 1.3.0 percona/pmm > values.yaml + + update_values_yaml "tag" "2.44.0" + update_values_yaml "repository" "percona/pmm-server" + + helm install pmm4 --version 1.3.0 -f values.yaml --wait percona/pmm + + wait_for_pmm + start_port_forward 443 + + admin_pass=$(get_pmm_pswd) + pmm_address=$(get_pmm_addr) + + # encode pass, as it can have special characters + encoded_u_p=$(echo -n admin:${admin_pass} | base64) + + echo "curl -k -H 'Authorization: Basic ...' https://"${pmm_address}"/v1/version" + # echo admin pass in case there are some issues with it + echo "pass:${admin_pass}" + + run bash -c "curl -sk -H 'Authorization: Basic ${encoded_u_p}' https://${pmm_address}/v1/version | jq .version" + assert_success + + # Check that the pmm_version string is not empty + if [[ -z "${output}" ]]; then + fail "pmm_version is empty" + fi + + pmm_version=${output} + echo "pmm_version is ${pmm_version}" + + stop_port_forward + start_port_forward + + helm show values percona/pmm > values.yaml + + update_values_yaml "tag" "$IMAGE_TAG" + update_values_yaml "repository" "$IMAGE_REPO" + + kubectl exec pmm4-0 -- supervisorctl stop all + kubectl exec pmm4-0 -- chown -R pmm:pmm /srv + + helm upgrade pmm4 -f values.yaml --set podSecurityContext.runAsGroup=null --set podSecurityContext.fsGroup=null percona/pmm + sleep 7 # give a chance to update manifest + wait_for_pmm + + pmm_version=$(get_pmm_version) + + local new_ver=$(kubectl get pod --selector=app.kubernetes.io/name=pmm -o jsonpath="{.items[*].spec.containers[*].image}") + + if [ "$new_ver" != "$IMAGE_REPO:$IMAGE_TAG" ]; then + echo "Unexpected version: $new_ver , should be '$IMAGE_REPO:$IMAGE_TAG'" + cat values.yaml + false + fi + + stop_port_forward + helm uninstall --wait --timeout 60s pmm4 + delete_pvc +} + diff --git a/k8s/k8s_helper.sh b/k8s/k8s_helper.sh new file mode 100644 index 00000000..b73288c0 --- /dev/null +++ b/k8s/k8s_helper.sh @@ -0,0 +1,44 @@ +wait_for_pmm(){ + sleep 5 + kubectl wait --for=condition=Ready --selector=app.kubernetes.io/name=pmm pod --timeout=5m +} + +delete_pvc(){ + kubectl delete pvc --selector=app.kubernetes.io/name=pmm + kubectl wait --for=delete --selector=app.kubernetes.io/name=pmm pvc --timeout=5m +} + +get_pmm_pswd(){ + kubectl get secret pmm-secret -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode +} + +# Function to start port forwarding +start_port_forward(){ + local inner_port=${1:-8443} # Set to first argument or default to 8443 + POD_NAME=$(kubectl get pods -n default -l app.kubernetes.io/name=pmm -o jsonpath='{.items[0].metadata.name}') + + kubectl port-forward "$POD_NAME" 8443:"${inner_port}" -n default & + PORT_FORWARD_PID=$! + + echo $PORT_FORWARD_PID > port_forward.pid + sleep 5 # Give port forwarding some time to set up +} + + +# Function to stop port forwarding +stop_port_forward(){ + if [ -f port_forward.pid ]; then + PORT_FORWARD_PID=$(cat port_forward.pid) + kill $PORT_FORWARD_PID || true + rm port_forward.pid + fi +} + +# Retrieves the value of a specified environment variable from the first pod of a given Kubernetes instance. +get_env_variable(){ + local instance_name=$1 + local env_var=$2 + local pod_name=$(kubectl get pods -l app.kubernetes.io/instance=$instance_name -o jsonpath='{.items[0].metadata.name}') + kubectl exec -it $pod_name -- printenv $env_var +} + diff --git a/k8s/pmm_helper.sh b/k8s/pmm_helper.sh new file mode 100644 index 00000000..824ec981 --- /dev/null +++ b/k8s/pmm_helper.sh @@ -0,0 +1,35 @@ + +load "./lib/bats-support/load" # Load BATS support libraries +load "./lib/bats-assert/load" # Load BATS assertions + +get_pmm_addr(){ + local node_port=8443 + local node_ip=127.0.0.1 + echo $node_ip:$node_port +} + +get_pmm_version() { + # depends on the driver, but probably local PVC wouldn't be cleaned up + # and pass would be set only during this first pvc init + # so always use new name if you want to provision PVC in helm install (pmmX) + + admin_pass=$(get_pmm_pswd) + pmm_address=$(get_pmm_addr) + + # encode pass, as it can have special characters + encoded_u_p=$(echo -n admin:${admin_pass} | base64) + + echo "curl -k -H 'Authorization: Basic ...' https://"${pmm_address}"/v1/version" + # echo admin pass in case there are some issues with it + echo "pass:${admin_pass}" + + run bash -c "curl -sk -H 'Authorization: Basic ${encoded_u_p}' https://${pmm_address}/v1/version | jq .version" + assert_success + + # Check that the pmm_version string is not empty + if [[ -z "${output}" ]]; then + fail "pmm_version is empty" + fi + + echo $output +} diff --git a/k8s/setup_bats_libs.sh b/k8s/setup_bats_libs.sh new file mode 100755 index 00000000..9a9fa1d7 --- /dev/null +++ b/k8s/setup_bats_libs.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Define the directory where the libraries will be cloned +LIBS_DIR="lib" + +# Create the directory if it doesn't exist +mkdir -p $LIBS_DIR + +# Clone bats-support +if [ ! -d "$LIBS_DIR/bats-support" ]; then + git clone https://github.com/bats-core/bats-support.git $LIBS_DIR/bats-support +else + echo "bats-support is already present." +fi + +# Clone bats-assert +if [ ! -d "$LIBS_DIR/bats-assert" ]; then + git clone https://github.com/bats-core/bats-assert.git $LIBS_DIR/bats-assert +else + echo "bats-assert is already present." +fi + +echo "BATS libraries setup completed"