Skip to content

Commit

Permalink
fix: add debug container to CNS Pod for tests
Browse files Browse the repository at this point in the history
Signed-off-by: GitHub <noreply@github.com>
  • Loading branch information
rbtr authored Jun 14, 2024
1 parent 40bb4f2 commit c2d9d2f
Show file tree
Hide file tree
Showing 11 changed files with 220 additions and 60 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ stages:
cd hack/scripts
chmod +x async-delete-test.sh
./async-delete-test.sh
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]'
fi
name: "testAsyncDelete"
Expand Down
4 changes: 1 addition & 3 deletions cns/linux.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,7 @@ RUN tdnf install -y iptables

FROM mariner-distroless
COPY --from=iptables /usr/sbin/*tables* /usr/sbin/
COPY --from=iptables /usr/lib/iptables /usr/lib/iptables
COPY --from=iptables /usr/lib/libip* /usr/lib/
COPY --from=iptables /usr/lib/libxtables* /usr/lib/
COPY --from=iptables /usr/lib /usr/lib
COPY --from=builder /usr/local/bin/azure-cns \
/usr/local/bin/azure-cns
ENTRYPOINT [ "/usr/local/bin/azure-cns" ]
Expand Down
6 changes: 3 additions & 3 deletions hack/scripts/async-delete-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,11 @@ do

echo "check directory for pending delete"
cns_pod=$(kubectl get pods -l k8s-app=azure-cns -n kube-system -o wide | grep "$node_name" | awk '{print $1}')
file=$(kubectl exec -it $cns_pod -n kube-system -- ls var/run/azure-vnet/deleteIDs)
file=$(kubectl exec -it $cns_pod -c debug -n kube-system -- ls var/run/azure-vnet/deleteIDs)
if [ -z $file ]; then
while [ -z $file ];
do
file=$(kubectl exec -i $cns_pod -n kube-system -- ls var/run/azure-vnet/deleteIDs)
file=$(kubectl exec -i $cns_pod -c debug -n kube-system -- ls var/run/azure-vnet/deleteIDs)
done
fi
echo "pending deletes"
Expand All @@ -37,7 +37,7 @@ do
echo "wait 30s for filesystem delete to occur"
sleep 30s
echo "check directory is now empty"
check_directory=$(kubectl exec -i $cns_pod -n kube-system -- ls var/run/azure-vnet/deleteIDs)
check_directory=$(kubectl exec -i $cns_pod -c debug -n kube-system -- ls var/run/azure-vnet/deleteIDs)
if [ -z $check_directory ]; then
echo "async delete success"
break
Expand Down
27 changes: 27 additions & 0 deletions test/integration/manifests/cns/daemonset-linux.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,33 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: debug
image: mcr.microsoft.com/cbl-mariner/base/core:2.0
imagePullPolicy: IfNotPresent
command: ["sleep", "3600"]
securityContext:
capabilities:
add:
- NET_ADMIN
volumeMounts:
- name: log
mountPath: /var/log
- name: cns-state
mountPath: /var/lib/azure-network
- name: azure-endpoints
mountPath: /var/run/azure-cns/
- name: cns-config
mountPath: /etc/azure-cns
- name: cni-bin
mountPath: /opt/cni/bin
- name: azure-vnet
mountPath: /var/run/azure-vnet
- name: legacy-cni-state
mountPath: /var/run/azure-vnet.json
- name: xtables-lock
mountPath: /run/xtables.lock
- name: cni-conflist
mountPath: /etc/cni/net.d
initContainers:
- name: cni-installer
image: acnpublic.azurecr.io/cni-dropgz:latest
Expand Down
6 changes: 3 additions & 3 deletions test/integration/swiftv2/swiftv2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ func TestSwiftv2PodToPod(t *testing.T) {
for _, pod := range allPods.Items {
for _, ip := range ipsToPing {
t.Logf("ping from pod %q to %q", pod.Name, ip)
result := podTest(t, ctx, clientset, pod, []string{"ping", "-c", "3", ip}, restConfig)
result := podTest(t, ctx, clientset, pod, "", []string{"ping", "-c", "3", ip}, restConfig)
if result != nil {
t.Errorf("ping %q failed: error: %s", ip, result)
}
Expand All @@ -163,8 +163,8 @@ func TestSwiftv2PodToPod(t *testing.T) {
return
}

func podTest(t *testing.T, ctx context.Context, clientset *kuberneteslib.Clientset, srcPod v1.Pod, cmd []string, rc *restclient.Config) error {
output, err := kubernetes.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc)
func podTest(t *testing.T, ctx context.Context, clientset *kuberneteslib.Clientset, srcPod v1.Pod, container string, cmd []string, rc *restclient.Config) error {
output, err := kubernetes.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, container, cmd, rc)
t.Logf(string(output))
if err != nil {
t.Errorf("failed to execute command on pod: %v", srcPod.Name)
Expand Down
2 changes: 1 addition & 1 deletion test/internal/datapath/datapath_win.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ var ipv6PrefixPolicy = []string{"powershell", "-c", "curl.exe", "-6", "-v", "www

func podTest(ctx context.Context, clientset *kubernetes.Clientset, srcPod *apiv1.Pod, cmd []string, rc *restclient.Config, passFunc func(string) error) error {
logrus.Infof("podTest() - %v %v", srcPod.Name, cmd)
output, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc)
output, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, "", cmd, rc)
if err != nil {
return errors.Wrapf(err, "failed to execute command on pod: %v", srcPod.Name)
}
Expand Down
15 changes: 8 additions & 7 deletions test/internal/kubernetes/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ func writeToFile(dir, fileName, str string) error {
return errors.Wrap(err, "failed to write string")
}

func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName string, cmd []string, config *rest.Config) ([]byte, error) {
func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName, containerName string, cmd []string, config *rest.Config) ([]byte, error) {
var result []byte
execCmdOnPod := func() error {
req := clientset.CoreV1().RESTClient().Post().
Expand All @@ -436,11 +436,12 @@ func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespac
Namespace(namespace).
SubResource("exec").
VersionedParams(&corev1.PodExecOptions{
Command: cmd,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: false,
Command: cmd,
Container: containerName,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: false,
}, scheme.ParameterCodec)

exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
Expand Down Expand Up @@ -582,7 +583,7 @@ func RestartKubeProxyService(ctx context.Context, clientset *kubernetes.Clientse
}
privilegedPod := pod.Items[0]
// exec into the pod and restart kubeproxy
_, err = ExecCmdOnPod(ctx, clientset, privilegedNamespace, privilegedPod.Name, restartKubeProxyCmd, config)
_, err = ExecCmdOnPod(ctx, clientset, privilegedNamespace, privilegedPod.Name, "", restartKubeProxyCmd, config)
if err != nil {
return errors.Wrapf(err, "failed to exec into privileged pod %s on node %s", privilegedPod.Name, node.Name)
}
Expand Down
50 changes: 34 additions & 16 deletions test/internal/kubernetes/utils_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,87 +77,87 @@ func MustCreateDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetIn
MustDeleteDaemonset(ctx, daemonsets, ds)
log.Printf("Creating Daemonset %v", ds.Name)
if _, err := daemonsets.Create(ctx, &ds, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create daemonset"))
log.Fatal(errors.Wrap(err, "failed to create daemonset"))
}
}

func MustCreateDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) {
MustDeleteDeployment(ctx, deployments, d)
log.Printf("Creating Deployment %v", d.Name)
if _, err := deployments.Create(ctx, &d, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create deployment"))
log.Fatal(errors.Wrap(err, "failed to create deployment"))
}
}

func mustCreateServiceAccount(ctx context.Context, svcAccounts typedcorev1.ServiceAccountInterface, s corev1.ServiceAccount) {
if err := svcAccounts.Delete(ctx, s.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete svc account"))
log.Fatal(errors.Wrap(err, "failed to delete svc account"))
}
}
log.Printf("Creating ServiceAccount %v", s.Name)
if _, err := svcAccounts.Create(ctx, &s, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create svc account"))
log.Fatal(errors.Wrap(err, "failed to create svc account"))
}
}

func mustCreateClusterRole(ctx context.Context, clusterRoles typedrbacv1.ClusterRoleInterface, cr rbacv1.ClusterRole) {
if err := clusterRoles.Delete(ctx, cr.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete cluster role"))
log.Fatal(errors.Wrap(err, "failed to delete cluster role"))
}
}
log.Printf("Creating ClusterRoles %v", cr.Name)
if _, err := clusterRoles.Create(ctx, &cr, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create cluster role"))
log.Fatal(errors.Wrap(err, "failed to create cluster role"))
}
}

func mustCreateClusterRoleBinding(ctx context.Context, crBindings typedrbacv1.ClusterRoleBindingInterface, crb rbacv1.ClusterRoleBinding) {
if err := crBindings.Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete cluster role binding"))
log.Fatal(errors.Wrap(err, "failed to delete cluster role binding"))
}
}
log.Printf("Creating RoleBinding %v", crb.Name)
if _, err := crBindings.Create(ctx, &crb, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create role binding"))
log.Fatal(errors.Wrap(err, "failed to create role binding"))
}
}

func mustCreateRole(ctx context.Context, rs typedrbacv1.RoleInterface, r rbacv1.Role) {
if err := rs.Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete role"))
log.Fatal(errors.Wrap(err, "failed to delete role"))
}
}
log.Printf("Creating Role %v", r.Name)
if _, err := rs.Create(ctx, &r, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create role"))
log.Fatal(errors.Wrap(err, "failed to create role"))
}
}

func mustCreateRoleBinding(ctx context.Context, rbi typedrbacv1.RoleBindingInterface, rb rbacv1.RoleBinding) {
if err := rbi.Delete(ctx, rb.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete role binding"))
log.Fatal(errors.Wrap(err, "failed to delete role binding"))
}
}
log.Printf("Creating RoleBinding %v", rb.Name)
if _, err := rbi.Create(ctx, &rb, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create role binding"))
log.Fatal(errors.Wrap(err, "failed to create role binding"))
}
}

func mustCreateConfigMap(ctx context.Context, cmi typedcorev1.ConfigMapInterface, cm corev1.ConfigMap) {
if err := cmi.Delete(ctx, cm.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete configmap"))
log.Fatal(errors.Wrap(err, "failed to delete configmap"))
}
}
log.Printf("Creating ConfigMap %v", cm.Name)
if _, err := cmi.Create(ctx, &cm, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create configmap"))
log.Fatal(errors.Wrap(err, "failed to create configmap"))
}
}

Expand All @@ -177,7 +177,7 @@ func MustScaleDeployment(ctx context.Context,
log.Printf("Waiting for pods to be ready..")
err := WaitForPodDeployment(ctx, clientset, namespace, deployment.Name, podLabelSelector, replicas)
if err != nil {
panic(errors.Wrap(err, "failed to wait for pod deployment"))
log.Fatal(errors.Wrap(err, "failed to wait for pod deployment"))
}
}
}
Expand All @@ -189,7 +189,7 @@ func MustCreateNamespace(ctx context.Context, clienset *kubernetes.Clientset, na
},
}, metav1.CreateOptions{})
if err != nil {
panic(errors.Wrapf(err, "failed to create namespace %v", namespace))
log.Fatal(errors.Wrapf(err, "failed to create namespace %v", namespace))
}
}

Expand Down Expand Up @@ -614,6 +614,15 @@ func hostPathTypePtr(h corev1.HostPathType) *corev1.HostPathType {

func volumesForAzureCNIOverlayLinux() []corev1.Volume {
return []corev1.Volume{
{
Name: "azure-endpoints",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var/run/azure-cns/",
Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate),
},
},
},
{
Name: "log",
VolumeSource: corev1.VolumeSource{
Expand Down Expand Up @@ -687,6 +696,15 @@ func volumesForAzureCNIOverlayLinux() []corev1.Volume {
},
},
},
{
Name: "xtables-lock",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/run/xtables.lock",
Type: hostPathTypePtr(corev1.HostPathFile),
},
},
},
}
}

Expand Down
Loading

0 comments on commit c2d9d2f

Please sign in to comment.