diff --git a/e2e/internal/kubeclient/deploy.go b/e2e/internal/kubeclient/deploy.go index 908f1b15dd..cca3df7c1b 100644 --- a/e2e/internal/kubeclient/deploy.go +++ b/e2e/internal/kubeclient/deploy.go @@ -106,7 +106,7 @@ func (s StatefulSet) getPods(ctx context.Context, client *Kubeclient, namespace, // WaitForPod watches the given pod and blocks until it meets the condition Ready=True or the // context expires (is cancelled or times out). func (c *Kubeclient) WaitForPod(ctx context.Context, namespace, name string) error { - watcher, err := c.client.CoreV1().Pods(namespace).Watch(ctx, metav1.ListOptions{FieldSelector: "metadata.name=" + name}) + watcher, err := c.Client.CoreV1().Pods(namespace).Watch(ctx, metav1.ListOptions{FieldSelector: "metadata.name=" + name}) if err != nil { return err } @@ -148,7 +148,7 @@ func (c *Kubeclient) WaitFor(ctx context.Context, resource ResourceWaiter, names retryLoop: for { - watcher, err := resource.watcher(ctx, c.client, namespace, name) + watcher, err := resource.watcher(ctx, c.Client, namespace, name) if err != nil { return err } @@ -216,7 +216,7 @@ retryLoop: // WaitForLoadBalancer waits until the given service is configured with an external IP and returns it. func (c *Kubeclient) WaitForLoadBalancer(ctx context.Context, namespace, name string) (string, error) { - watcher, err := c.client.CoreV1().Services(namespace).Watch(ctx, metav1.ListOptions{FieldSelector: "metadata.name=" + name}) + watcher, err := c.Client.CoreV1().Services(namespace).Watch(ctx, metav1.ListOptions{FieldSelector: "metadata.name=" + name}) if err != nil { return "", err } @@ -291,7 +291,7 @@ func isPodReady(pod *corev1.Pod) bool { } func (c *Kubeclient) resourceInterfaceFor(obj *unstructured.Unstructured) (dynamic.ResourceInterface, error) { - dyn := dynamic.New(c.client.RESTClient()) + dyn := dynamic.New(c.Client.RESTClient()) gvk := obj.GroupVersionKind() mapping, err := c.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) @@ -353,7 +353,7 @@ func (c *Kubeclient) Restart(ctx context.Context, resource ResourceWaiter, names return err } for _, pod := range pods { - err := c.client.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{ + err := c.Client.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{ GracePeriodSeconds: toPtr(int64(0)), }) if err != nil { diff --git a/e2e/internal/kubeclient/kubeclient.go b/e2e/internal/kubeclient/kubeclient.go index 74c23e6d4d..93306b0faf 100644 --- a/e2e/internal/kubeclient/kubeclient.go +++ b/e2e/internal/kubeclient/kubeclient.go @@ -31,8 +31,8 @@ import ( type Kubeclient struct { log *slog.Logger - // client is the underlying Kubernetes client. - client *kubernetes.Clientset + // Client is the underlying Kubernetes Client. + Client *kubernetes.Clientset // restMapper allows to look up schema information for dynamic resources restMapper meta.RESTMapper // config is the "Kubeconfig" for the client @@ -53,7 +53,7 @@ func New(config *rest.Config, log *slog.Logger) (*Kubeclient, error) { return &Kubeclient{ log: log, - client: client, + Client: client, config: config, restMapper: restmapper.NewDiscoveryRESTMapper(resources), }, nil @@ -89,11 +89,11 @@ func NewForTest(t *testing.T) *Kubeclient { // A pod is considered to belong to a deployment if it is owned by a ReplicaSet which is in turn // owned by the Deployment in question. func (c *Kubeclient) PodsFromDeployment(ctx context.Context, namespace, deployment string) ([]v1.Pod, error) { - replicasets, err := c.client.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{}) + replicasets, err := c.Client.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing replicasets: %w", err) } - pods, err := c.client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + pods, err := c.Client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing pods: %w", err) } @@ -119,7 +119,7 @@ func (c *Kubeclient) PodsFromDeployment(ctx context.Context, namespace, deployme // PodsFromOwner returns the pods owned by an object in the namespace of the given kind. func (c *Kubeclient) PodsFromOwner(ctx context.Context, namespace, kind, name string) ([]v1.Pod, error) { - pods, err := c.client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + pods, err := c.Client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing pods: %w", err) } @@ -142,7 +142,7 @@ func (c *Kubeclient) Exec(ctx context.Context, namespace, pod string, argv []str ) { buf := &bytes.Buffer{} errBuf := &bytes.Buffer{} - request := c.client.CoreV1().RESTClient(). + request := c.Client.CoreV1().RESTClient(). Post(). Namespace(namespace). Resource("pods"). @@ -187,7 +187,7 @@ func (c *Kubeclient) ExecDeployment(ctx context.Context, namespace, deployment s // LogDebugInfo collects pod information from the cluster and writes it to the logger. func (c *Kubeclient) LogDebugInfo(ctx context.Context) { - namespaces, err := c.client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + namespaces, err := c.Client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { c.log.Error("Could not get namespaces", "error", err) return @@ -195,7 +195,7 @@ func (c *Kubeclient) LogDebugInfo(ctx context.Context) { for _, namespace := range namespaces.Items { c.log.Debug("Collecting debug info for pods", "namespace", namespace.Name) - pods, err := c.client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{}) + pods, err := c.Client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { c.log.Error("Could not get pods", "namespace", namespace.Name, "error", err) continue diff --git a/e2e/internal/kubeclient/portforward.go b/e2e/internal/kubeclient/portforward.go index d215f67a0c..81c464ec2d 100644 --- a/e2e/internal/kubeclient/portforward.go +++ b/e2e/internal/kubeclient/portforward.go @@ -65,7 +65,7 @@ func (k *Kubeclient) portForwardPod(ctx context.Context, namespace, podName, rem errorCh := make(chan error) // Ports are forwarded by upgrading this POST request to a SPDY connection. - req := k.client.CoreV1().RESTClient().Post(). + req := k.Client.CoreV1().RESTClient().Post(). Resource("pods"). Namespace(namespace). Name(podName). diff --git a/e2e/regression/regression_test.go b/e2e/regression/regression_test.go index 557b9574f6..0d63fef8c5 100644 --- a/e2e/regression/regression_test.go +++ b/e2e/regression/regression_test.go @@ -10,6 +10,7 @@ import ( "context" "flag" "os" + "path" "strings" "testing" "time" @@ -20,6 +21,7 @@ import ( "github.com/edgelesssys/contrast/internal/manifest" "github.com/edgelesssys/contrast/internal/platforms" "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var ( @@ -38,38 +40,53 @@ func TestRegression(t *testing.T) { runtimeHandler, err := manifest.RuntimeHandler(platform) require.NoError(t, err) + ct := contrasttest.New(t, imageReplacementsFile, namespaceFile, platform, false) + + resources := kuberesource.CoordinatorBundle() + resources = kuberesource.PatchRuntimeHandlers(resources, runtimeHandler) + resources = kuberesource.AddPortForwarders(resources) + + ct.Init(t, resources) + + require.True(t, t.Run("generate", ct.Generate), "contrast generate needs to succeed for subsequent tests") + require.True(t, t.Run("apply", ct.Apply), "Kubernetes resources need to be applied for subsequent tests") + require.True(t, t.Run("set", ct.Set), "contrast set needs to succeed for subsequent tests") + require.True(t, t.Run("verify", ct.Verify), "contrast verify needs to succeed for subsequent tests") + for _, file := range files { t.Run(file.Name(), func(t *testing.T) { require := require.New(t) c := kubeclient.NewForTest(t) - ct := contrasttest.New(t, imageReplacementsFile, namespaceFile, platform, false) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) defer cancel() - resources := kuberesource.CoordinatorBundle() - yaml, err := os.ReadFile(yamlDir + file.Name()) require.NoError(err) yaml = bytes.ReplaceAll(yaml, []byte("@@REPLACE_NAMESPACE@@"), []byte(ct.Namespace)) - yamlResources, err := kuberesource.UnmarshalApplyConfigurations(yaml) + newResources, err := kuberesource.UnmarshalApplyConfigurations(yaml) require.NoError(err) - resources = append(resources, yamlResources...) - - resources = kuberesource.PatchRuntimeHandlers(resources, runtimeHandler) + // newResources := append(resources, yamlResources...) - resources = kuberesource.AddPortForwarders(resources) + newResources = kuberesource.PatchRuntimeHandlers(newResources, runtimeHandler) + newResources = kuberesource.AddPortForwarders(newResources) - ct.Init(t, resources) + // write the new resources.yaml + resourceBytes, err := kuberesource.EncodeResources(newResources...) + require.NoError(err) + require.NoError(os.WriteFile(path.Join(ct.WorkDir, "resources.yaml"), resourceBytes, 0o644)) + // generate and set require.True(t.Run("generate", ct.Generate), "contrast generate needs to succeed for subsequent tests") require.True(t.Run("apply", ct.Apply), "Kubernetes resources need to be applied for subsequent tests") require.True(t.Run("set", ct.Set), "contrast set needs to succeed for subsequent tests") - require.True(t.Run("verify", ct.Verify), "contrast verify needs to succeed for subsequent tests") deploymentName, _ := strings.CutSuffix(file.Name(), ".yaml") require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, deploymentName)) + + // delete the deployment + require.NoError(ct.Kubeclient.Client.AppsV1().Deployments(ct.Namespace).Delete(ctx, deploymentName, metav1.DeleteOptions{})) }) } }