diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 93d35d7..354c648 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -171,6 +171,54 @@ jobs: sleep 5 echo "Knative test succesful!" + - name: "Run nydus host-share test" + run: | + # Change the snapshotter mode + ./bin/inv_wrapper.sh nydus-snapshotter.set-mode host-share + + export SC2_RUNTIME_CLASS=qemu-${{ matrix.tee }}-sc2 + export POD_LABEL="apps.sc2.io/name=helloworld-py" + + # ----- Python Test ---- + + echo "Running python test..." + envsubst < ./demo-apps/helloworld-py-nydus/deployment.yaml | ./bin/kubectl apply -f - + + # Wait for pod to be ready + until [ "$(./bin/kubectl get pods -l ${POD_LABEL} -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}')" = "True" ]; do echo "Waiting for pod to be ready..."; sleep 2; done + sleep 1 + + # Get the pod's IP + service_ip=$(./bin/kubectl get services -o jsonpath='{.items[?(@.metadata.name=="coco-helloworld-py-node-port")].spec.clusterIP}') + [ "$(curl --retry 3 -X GET ${service_ip}:8080)" = "Hello World!" ] + envsubst < ./demo-apps/helloworld-py-nydus/deployment.yaml | ./bin/kubectl delete -f - + + # Wait for pod to be deleted + ./bin/kubectl wait --for=delete -l ${POD_LABEL} pod --timeout=30s + + # Extra cautionary sleep + sleep 5 + echo "Python test succesful!" + + # ----- Knative Test ---- + envsubst < ./demo-apps/helloworld-knative-nydus/service.yaml | ./bin/kubectl apply -f - + sleep 1 + + # Get the service URL + service_url=$(./bin/kubectl get ksvc helloworld-knative --output=custom-columns=URL:.status.url --no-headers) + [ "$(curl --retry 3 ${service_url})" = "Hello World!" ] + + # Wait for pod to be deleted + envsubst < ./demo-apps/helloworld-knative-nydus/service.yaml | ./bin/kubectl delete -f - + ./bin/kubectl wait --for=delete -l ${POD_LABEL} pod --timeout=60s + + # Extra cautionary sleep + sleep 5 + echo "Knative test succesful!" + + # Change the snapshotter mode back again + ./bin/inv_wrapper.sh nydus-snapshotter.set-mode guest-pull + - name: "Enable default-memory annotation" run: | for runtime_class in ${{ matrix.runtime_classes }}; do @@ -182,8 +230,7 @@ jobs: # Aftre changing the annotation of the qemu-snp-sc2 runtime class we # need to restart the VM cache - sudo -E ./vm-cache/target/release/vm-cache stop - sudo -E ./vm-cache/target/release/vm-cache background + sudo -E ./vm-cache/target/release/vm-cache restart - name: "Run knative chaining demo" run: | diff --git a/README.md b/README.md index 630a1b4..6bdd7f8 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,7 @@ For further documentation, you may want to check these other documents: * [CoCo Upgrade](./docs/upgrade_coco.md) - upgrade the current CoCo version. * [Guest Components](./docs/guest_components.md) - instructions to patch components inside SC2 guests. * [Host Kernel](./docs/host_kernel.md) - bump the kernel version in the host. +* [Image Pull](./docs/image_pull.md) - details on the image-pulling mechanisms supported in SC2. * [K8s](./docs/k8s.md) - documentation about configuring a single-node Kubernetes cluster. * [Kata](./docs/kata.md) - instructions to build our custom Kata fork and `initrd` images. * [Key Broker Service](./docs/kbs.md) - docs on using and patching the KBS. diff --git a/docs/image_pull.md b/docs/image_pull.md new file mode 100644 index 0000000..e31b4f0 --- /dev/null +++ b/docs/image_pull.md @@ -0,0 +1,54 @@ +## Image Pull + +This document describes the different mechanisms to get a container image +inside a cVM in SC2. We _always_ assume that the integrity of container images +must be validated. We also consider the situation in which their confidentiality +must also be preserved. + +### Guest Pull + +The guest pull mechanism always pulls the container image inside the guest cVM. +This is the default mechanism in CoCo as it allows the most secure, and simplest +deployment: users sign (and encrypt) container images locally, they upload +them to a container registry, pull them inside the cVM, and decrypt them inside +the cVM. + +Albeit secure, this mechanism has high performance overheads as the image must +be pulled every single time, precluding any caching benefits. + +To mitigate the performance overheads, we can convert the OCI image to a +Nydus image, that supports lazy loading of container data. + +### Host Share + +The host share mechanism mounts a container image from the host to the guest. +Given that the host is untrusted, this mechanism only works for images that +do not have confidentiality requirements. To maintain integrity, we mount +the image with `dm-verity`, and validate the `dm-verity` device as part of +attestation. + +We choose to mount individual layers separately (rather than whole images), +but we should measure that the former is actually better than the latter. + +We could mount encrypted images from the host to the guest, but we would be +losing on the de-duplication opportunities in the host. + +### Usage + +Each image pull mechanism is implemented as a different remote snapshotter +in containerd, all of them based on the [nydus-snapshotter]( +https://github.com/containerd/nydus-snapshotter/) plus our modifications. + +To switch between different image-pulling mechanisms, you only need to change +the snapshotter mode: + +```bash +inv nydus-snapshotter.set-mode [guest-pull,host-share] +``` + +If you see any snapshotter related issues (either in the `containerd` or the +`nydus-snapshotter` journal logs), you can purge the snapshotters: + +```bash +inv nydus-snapshotter.purge +``` diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index d1b4aa5..a6cca89 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -151,14 +151,27 @@ ctr -n k8s.io content fetch ${IMAGE_NAME} the image name is the image tag appearing right before the error message in the pod logs. -### Nydus snapshot corruption +### Rootfs Mount Issue -Sometimes, after hot-replacing the nydus-snapshotter, snapshots become corrupted, -and we can see the error below. +Sometimes, if we are mixing and matching different snapshotters, we may run +into the following error: ``` Failed to create pod sandbox: rpc error: code = Unknown desc = failed to create containerd task: failed to create shim task: failed to mount /run/kata-containers/shared/containers/0a583f0691d78e2036425f99bdac8e03302158320c1c55a5c6482cae7e729009/rootfs to /run/kata-containers/0a583f0691d78e2036425f99bdac8e03302158320c1c55a5c6482cae7e729009/rootfs, with error: ENOENT: No such file or directory ``` -The only solution I found was to bump to a more up-to-date version of nydus. -This seemed to fix the issue. +this is because the pause image bundle has not been unpacked correctly. Note +that the pause image bundle is unpacked into the `/run/kata-containers/shared` +directory, and then mounted into the `/run/kata-containers/` one. + +This usually happens when containerd believes that we already have the pause +image, so we do not need to pull it. This prevents the snapshotter from +generating the respective Kata virtual volumes. + +As a rule of thumb, a good fix is to remove all images involved in the app +from the content store, and purge snapshotter caches: + +```bash +sudo crictl rmi +inv nydus-snapshotter.purge +``` diff --git a/tasks/containerd.py b/tasks/containerd.py index 1abf3ee..c12ab5d 100644 --- a/tasks/containerd.py +++ b/tasks/containerd.py @@ -31,23 +31,24 @@ CONTAINERD_HOST_BINPATH = "/usr/bin" -def do_build(debug=False): - docker_cmd = "docker build -t {} -f {} .".format( +def do_build(nocache=False): + docker_cmd = "docker build{} -t {} -f {} .".format( + " --no-cache" if nocache else "", CONTAINERD_IMAGE_TAG, join(PROJ_ROOT, "docker", "containerd.dockerfile"), ) - result = run(docker_cmd, shell=True, capture_output=True, cwd=PROJ_ROOT) - assert result.returncode == 0, print(result.stderr.decode("utf-8").strip()) - if debug: - print(result.stdout.decode("utf-8").strip()) + run(docker_cmd, shell=True, check=True, cwd=PROJ_ROOT) @task -def build(ctx): +def build(ctx, nocache=False, push=False): """ Build the containerd fork for CoCo """ - do_build(debug=True) + do_build(nocache=nocache) + + if push: + run(f"docker push {CONTAINERD_IMAGE_TAG}", shell=True, check=True) @task @@ -73,19 +74,23 @@ def cli(ctx, mount_path=join(PROJ_ROOT, "..", "containerd")): @task -def set_log_level(ctx, log_level): +def stop(ctx): """ - Set containerd's log level, must be one in: info, debug + Stop the containerd work-on container """ - allowed_log_levels = ["info", "debug"] - if log_level not in allowed_log_levels: - print( - "Unsupported log level '{}'. Must be one in: {}".format( - log_level, allowed_log_levels - ) - ) - return + result = run( + "docker rm -f {}".format(CONTAINERD_CTR_NAME), + shell=True, + check=True, + capture_output=True, + ) + assert result.returncode == 0 + +def set_log_level(log_level): + """ + Set containerd's log level, must be one in: info, debug + """ updated_toml_str = """ [debug] level = "{log_level}" diff --git a/tasks/kata.py b/tasks/kata.py index 1128fb6..3548e45 100644 --- a/tasks/kata.py +++ b/tasks/kata.py @@ -67,20 +67,10 @@ def stop(ctx): stop_kata_workon_ctr() -@task -def set_log_level(ctx, log_level): +def set_log_level(log_level): """ Set kata's log level, must be one in: info, debug """ - allowed_log_levels = ["info", "debug"] - if log_level not in allowed_log_levels: - print( - "Unsupported log level '{}'. Must be one in: {}".format( - log_level, allowed_log_levels - ) - ) - return - enable_debug = str(log_level == "debug").lower() for runtime in KATA_RUNTIMES + SC2_RUNTIMES: @@ -146,6 +136,7 @@ def hot_replace_shim(ctx, runtime="qemu-snp-sc2"): ), ), sc2=runtime in SC2_RUNTIMES, + hot_replace=True, ) restart_containerd() diff --git a/tasks/kernel.py b/tasks/kernel.py index d8af798..9b07acb 100644 --- a/tasks/kernel.py +++ b/tasks/kernel.py @@ -54,8 +54,10 @@ def build_guest(debug=False, hot_replace=False): ctr_path, host_path, sudo=False, debug=debug, hot_replace=hot_replace ) + # The -V option enables dm-verity support in the guest (technically only + # needed for SC2) build_kernel_base_cmd = [ - f"./build-kernel.sh -x -f -v {GUEST_KERNEL_VERSION}", + f"./build-kernel.sh -x -V -f -v {GUEST_KERNEL_VERSION}", "-u 'https://cdn.kernel.org/pub/linux/kernel/v{}.x/'".format( GUEST_KERNEL_VERSION.split(".")[0] ), @@ -117,4 +119,7 @@ def build_guest(debug=False, hot_replace=False): @task def hot_replace_guest(ctx, debug=False): + """ + Hot-replace guest kernel + """ build_guest(debug=debug, hot_replace=True) diff --git a/tasks/nydus.py b/tasks/nydus.py index 63d7064..a8aef3c 100644 --- a/tasks/nydus.py +++ b/tasks/nydus.py @@ -2,7 +2,7 @@ from os.path import join from subprocess import run from tasks.util.docker import copy_from_ctr_image -from tasks.util.env import GHCR_URL, GITHUB_ORG, PROJ_ROOT, print_dotted_line +from tasks.util.env import COCO_ROOT, GHCR_URL, GITHUB_ORG, PROJ_ROOT, print_dotted_line from tasks.util.nydus import NYDUSIFY_PATH from tasks.util.versions import NYDUS_VERSION @@ -27,12 +27,20 @@ def build(ctx, nocache=False, push=False): def do_install(): - print_dotted_line(f"Installing nydusify (v{NYDUS_VERSION})") + print_dotted_line(f"Installing nydus image services (v{NYDUS_VERSION})") + # Non root-owned binaries ctr_bin = ["/go/src/github.com/sc2-sys/nydus/contrib/nydusify/cmd/nydusify"] host_bin = [NYDUSIFY_PATH] copy_from_ctr_image(NYDUS_IMAGE_TAG, ctr_bin, host_bin, requires_sudo=False) + # Root-owned binaries + # The host-pull functionality requires nydus-image >= 2.3.0, but the one + # installed with the daemon is 2.2.4 + ctr_bin = ["/go/src/github.com/sc2-sys/nydus/target/release/nydus-image"] + host_bin = [join(COCO_ROOT, "bin", "nydus-image")] + copy_from_ctr_image(NYDUS_IMAGE_TAG, ctr_bin, host_bin, requires_sudo=True) + print("Success!") diff --git a/tasks/nydus_snapshotter.py b/tasks/nydus_snapshotter.py index d5fbf64..7d234bd 100644 --- a/tasks/nydus_snapshotter.py +++ b/tasks/nydus_snapshotter.py @@ -1,14 +1,38 @@ from invoke import task -from os.path import join +from json import loads as json_loads +from os.path import exists, join from subprocess import run from tasks.util.docker import copy_from_ctr_image, is_ctr_running -from tasks.util.env import COCO_ROOT, GHCR_URL, GITHUB_ORG, PROJ_ROOT, print_dotted_line -from tasks.util.toml import update_toml +from tasks.util.env import ( + COCO_ROOT, + CONTAINERD_CONFIG_FILE, + CONTAINERD_CONFIG_ROOT, + GHCR_URL, + GITHUB_ORG, + KATA_RUNTIMES, + LOCAL_REGISTRY_URL, + PROJ_ROOT, + SC2_RUNTIMES, + print_dotted_line, +) +from tasks.util.toml import read_value_from_toml, update_toml from tasks.util.versions import NYDUS_SNAPSHOTTER_VERSION -NYDUS_SNAPSHOTTER_CONFIG_FILE = join( - COCO_ROOT, "share", "nydus-snapshotter", "config-coco-guest-pulling.toml" +NYDUS_SNAPSHOTTER_GUEST_PULL_NAME = "nydus" +NYDUS_SNAPSHOTTER_HOST_SHARE_NAME = "nydus-hs" + +NYDUS_SNAPSHOTTER_CONFIG_DIR = join(COCO_ROOT, "share", "nydus-snapshotter") +NYDUS_SNAPSHOTTER_GUEST_PULL_CONFIG = join( + NYDUS_SNAPSHOTTER_CONFIG_DIR, "config-coco-guest-pulling.toml" +) +NYDUS_SNAPSHOTTER_HOST_SHARE_CONFIG = join( + NYDUS_SNAPSHOTTER_CONFIG_DIR, "config-coco-host-sharing.toml" ) + +NYDUS_SNAPSHOTTER_CONFIG_FILES = [ + NYDUS_SNAPSHOTTER_GUEST_PULL_CONFIG, + NYDUS_SNAPSHOTTER_HOST_SHARE_CONFIG, +] NYDUS_SNAPSHOTTER_CTR_NAME = "nydus-snapshotter-workon" NYDUS_SNAPSHOTTER_IMAGE_TAG = ( join(GHCR_URL, GITHUB_ORG, "nydus-snapshotter") + f":{NYDUS_SNAPSHOTTER_VERSION}" @@ -29,11 +53,55 @@ def restart_nydus_snapshotter(): run("sudo service nydus-snapshotter restart", shell=True, check=True) +def do_purge(): + """ + Purging the snapshotters for a fresh-start is a two step process. First, + we need to remove all nydus metadata. This can be achieved by just + bluntly removing `/var/lib/containerd-nydus-*`. Secondly, we need to + reset a map that we keep in containerd's image store of what images + have we pulled with which snapshotters. This is, essentially, what + we see when we run `sudo crictl images`. There's no easy way to clear + just this map, so what we do is remove all the images that we may have + used. + """ + + # Clear nydus-snapshots + for snap in [NYDUS_SNAPSHOTTER_HOST_SHARE_NAME, NYDUS_SNAPSHOTTER_GUEST_PULL_NAME]: + run(f"sudo rm -rf /var/lib/containerd-{snap}", shell=True, check=True) + + # Clear all possibly used images (only images in our registry, or the + # pause container images) + cmd = ( + "sudo crictl --runtime-endpoint unix:///run/containerd/containerd.sock" + " images -o json" + ) + rm_cmd = "sudo crictl --runtime-endpoint unix:///run/containerd/containerd.sock rmi" + data = json_loads(run(cmd, shell=True, capture_output=True).stdout.decode("utf-8")) + for image_data in data["images"]: + if any([tag.startswith(LOCAL_REGISTRY_URL) for tag in image_data["repoTags"]]): + run("{} {}".format(rm_cmd, image_data["id"]), shell=True, check=True) + + if any( + [tag.startswith("registry.k8s.io/pause") for tag in image_data["repoTags"]] + ): + run("{} {}".format(rm_cmd, image_data["id"]), shell=True, check=True) + + restart_nydus_snapshotter() + + +@task +def purge(ctx): + """ + Remove all cached snapshots in the snapshotter cache + """ + do_purge() + + def install(debug=False, clean=False): """ Install the nydus snapshotter binaries """ - print_dotted_line(f"Installing nydus-snapshotter (v{NYDUS_SNAPSHOTTER_VERSION})") + print_dotted_line(f"Installing nydus-snapshotter(s) (v{NYDUS_SNAPSHOTTER_VERSION})") host_binaries = [ join(NYDUS_SNAPSHOTTER_HOST_BINPATH, binary) @@ -47,9 +115,86 @@ def install(debug=False, clean=False): NYDUS_SNAPSHOTTER_IMAGE_TAG, ctr_binaries, host_binaries, requires_sudo=True ) + # We install nydus with host-sharing as a "different" snapshotter + imports = read_value_from_toml(CONTAINERD_CONFIG_FILE, "imports") + host_share_import_path = join( + CONTAINERD_CONFIG_ROOT, + "config.toml.d", + f"{NYDUS_SNAPSHOTTER_HOST_SHARE_NAME}-snapshotter.toml", + ) + if host_share_import_path not in imports: + config_file = """ +[proxy_plugins] + [proxy_plugins.{}] + type = "snapshot" + address = "/run/containerd-nydus/containerd-nydus-grpc.sock" +""".format( + NYDUS_SNAPSHOTTER_HOST_SHARE_NAME + ) + + cmd = """ +sudo sh -c 'cat < {destination_file} +{file_contents} +EOF' +""".format( + destination_file=host_share_import_path, + file_contents=config_file, + ) + + run(cmd, shell=True, check=True) + + imports += [host_share_import_path] + updated_toml_str = """ + imports = [ {sn} ] + """.format( + sn=",".join([f'"{s}"' for s in imports]) + ) + update_toml(CONTAINERD_CONFIG_FILE, updated_toml_str) + + if not exists(NYDUS_SNAPSHOTTER_HOST_SHARE_CONFIG): + host_sharing_config = """ +version = 1 +root = "/var/lib/containerd-{nydus_hs_name}" +address = "/run/containerd-nydus/containerd-nydus-grpc.sock" +daemon_mode = "none" + +[system] +enable = true +address = "/run/containerd-nydus/system.sock" + +[daemon] +fs_driver = "blockdev" +nydusimage_path = "{nydus_image_path}" + +[remote] +skip_ssl_verify = true + +[snapshot] +enable_kata_volume = true + +[experimental.tarfs] +enable_tarfs = true +mount_tarfs_on_host = false +export_mode = "layer_block_with_verity" +""".format( + nydus_hs_name=NYDUS_SNAPSHOTTER_HOST_SHARE_NAME, + nydus_image_path=join(COCO_ROOT, "bin", "nydus-image"), + ) + + cmd = """ +sudo sh -c 'cat < {destination_file} +{file_contents} +EOF' +""".format( + destination_file=NYDUS_SNAPSHOTTER_HOST_SHARE_CONFIG, + file_contents=host_sharing_config, + ) + + run(cmd, shell=True, check=True) + # Remove all nydus config for a clean start if clean: - run("sudo rm -rf /var/lib/containerd-nydus", shell=True, check=True) + do_purge() # Restart the nydus service restart_nydus_snapshotter() @@ -73,27 +218,18 @@ def build(ctx, nocache=False, push=False): run(f"docker push {NYDUS_SNAPSHOTTER_IMAGE_TAG}", shell=True, check=True) -@task -def set_log_level(ctx, log_level): +def set_log_level(log_level): """ Set the log level for the nydus snapshotter """ - allowed_log_levels = ["info", "debug"] - if log_level not in allowed_log_levels: - print( - "Unsupported log level '{}'. Must be one in: {}".format( - log_level, allowed_log_levels - ) + for config_file in NYDUS_SNAPSHOTTER_CONFIG_FILES: + updated_toml_str = """ + [log] + level = "{log_level}" + """.format( + log_level=log_level ) - return - - updated_toml_str = """ - [log] - level = "{log_level}" - """.format( - log_level=log_level - ) - update_toml(NYDUS_SNAPSHOTTER_CONFIG_FILE, updated_toml_str) + update_toml(config_file, updated_toml_str) restart_nydus_snapshotter() @@ -124,6 +260,20 @@ def cli(ctx, mount_path=join(PROJ_ROOT, "..", "nydus-snapshotter")): ) +@task +def stop(ctx): + """ + Stop the nydus-snapshotter work-on container + """ + result = run( + "docker rm -f {}".format(NYDUS_SNAPSHOTTER_CTR_NAME), + shell=True, + check=True, + capture_output=True, + ) + assert result.returncode == 0 + + @task def hot_replace(ctx): """ @@ -148,3 +298,71 @@ def hot_replace(ctx): run(docker_cmd, shell=True, check=True) restart_nydus_snapshotter() + + +@task +def set_mode(ctx, mode): + """ + Set the nydus-snapshotter operation mode: 'guest-pull', or 'host-share' + """ + if mode not in ["guest-pull", "host-share"]: + print(f"ERROR: unrecognised nydus-snapshotter mode: {mode}") + print("ERROR: mode must be one in: ['guest-pull', 'host-share']") + return + + config_file = ( + NYDUS_SNAPSHOTTER_HOST_SHARE_CONFIG + if mode == "host-share" + else NYDUS_SNAPSHOTTER_GUEST_PULL_CONFIG + ) + exec_start = ( + f"{NYDUS_SNAPSHOTTER_HOST_BINPATH}/containerd-nydus-grpc " + f"--config {config_file} --log-to-stdout" + ) + + service_config = """ +[Unit] +Description=Nydus snapshotter +After=network.target local-fs.target +Before=containerd.service + +[Service] +ExecStart={} + +[Install] +RequiredBy=containerd.service +""".format( + exec_start + ) + + service_path = "/etc/systemd/system/nydus-snapshotter.service" + cmd = """ +sudo sh -c 'cat < {destination_file} +{file_contents} +EOF' +""".format( + destination_file=service_path, + file_contents=service_config, + ) + run(cmd, shell=True, check=True) + + # Update all runtime configurations to use the right snapshotter. We + # _always_ avoid having both snapshotters co-existing + snap_name = ( + NYDUS_SNAPSHOTTER_HOST_SHARE_NAME + if mode == "host-share" + else NYDUS_SNAPSHOTTER_GUEST_PULL_NAME + ) + for runtime in KATA_RUNTIMES + SC2_RUNTIMES: + updated_toml_str = """ + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-{runtime_name}] + snapshotter = "{snapshotter_name}" + """.format( + runtime_name=runtime, snapshotter_name=snap_name + ) + update_toml(CONTAINERD_CONFIG_FILE, updated_toml_str) + + # Reload systemd to apply the new service configuration + run("sudo systemctl daemon-reload", shell=True, check=True) + + restart_nydus_snapshotter() diff --git a/tasks/sc2.py b/tasks/sc2.py index 3fbe3b3..56fa4be 100644 --- a/tasks/sc2.py +++ b/tasks/sc2.py @@ -2,16 +2,23 @@ from os import environ, makedirs from os.path import exists, join from subprocess import run -from tasks.containerd import install as containerd_install +from tasks.containerd import ( + install as containerd_install, + set_log_level as containerd_set_log_level, +) from tasks.demo_apps import ( do_push_to_local_registry as push_demo_apps_to_local_registry, ) from tasks.k8s import install as k8s_tooling_install from tasks.k9s import install as k9s_install +from tasks.kata import set_log_level as kata_set_log_level from tasks.kernel import build_guest as build_guest_kernel from tasks.knative import install as knative_install from tasks.kubeadm import create as k8s_create, destroy as k8s_destroy -from tasks.nydus_snapshotter import install as nydus_snapshotter_install +from tasks.nydus_snapshotter import ( + install as nydus_snapshotter_install, + set_log_level as nydus_snapshotter_set_log_level, +) from tasks.nydus import do_install as nydus_install from tasks.operator import ( install as operator_install, @@ -353,3 +360,22 @@ def destroy(ctx, debug=False): assert result.returncode == 0, print(result.stderr.decode("utf-8").strip()) if debug: print(result.stdout.decode("utf-8").strip()) + + +@task +def set_log_level(ctx, log_level): + """ + Set log level for all SC2 containers: containerd, kata, and nydus-snapshotter + """ + allowed_log_levels = ["info", "debug"] + if log_level not in allowed_log_levels: + print( + "Unsupported log level '{}'. Must be one in: {}".format( + log_level, allowed_log_levels + ) + ) + return + + containerd_set_log_level(log_level) + kata_set_log_level(log_level) + nydus_snapshotter_set_log_level(log_level)