From dbd4a087dff60e48d23a3e3765c07e144c23a3ed Mon Sep 17 00:00:00 2001 From: Kevin Hannon Date: Tue, 26 Mar 2024 10:22:44 -0400 Subject: [PATCH] pr comments --- Makefile | 1 - hack/e2e-test.sh | 5 -- test/e2e/singlecluster/e2e_test.go | 82 ------------------ test/e2e/singlecluster/jobset_test.go | 115 ++++++++++++++++++++++++++ 4 files changed, 115 insertions(+), 88 deletions(-) create mode 100644 test/e2e/singlecluster/jobset_test.go diff --git a/Makefile b/Makefile index d09733fcdc..9f65030504 100644 --- a/Makefile +++ b/Makefile @@ -191,7 +191,6 @@ MULTIKUEUE-E2E_TARGETS := $(addprefix run-test-multikueue-e2e-,${E2E_K8S_VERSION .PHONY: test-e2e-all test-e2e-all: ginkgo $(E2E_TARGETS) $(MULTIKUEUE-E2E_TARGETS) - FORCE: run-test-e2e-%: K8S_VERSION = $(@:run-test-e2e-%=%) diff --git a/hack/e2e-test.sh b/hack/e2e-test.sh index ca4489555e..b0ea34ec0f 100755 --- a/hack/e2e-test.sh +++ b/hack/e2e-test.sh @@ -22,10 +22,6 @@ SOURCE_DIR="$(cd "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)" ROOT_DIR="$SOURCE_DIR/.." export E2E_TEST_IMAGE=gcr.io/k8s-staging-perf-tests/sleep:v0.1.0 -export JOBSET_MANIFEST=https://github.com/kubernetes-sigs/jobset/releases/download/${JOBSET_VERSION}/manifests.yaml -export JOBSET_IMAGE=registry.k8s.io/jobset/jobset:${JOBSET_VERSION} -export JOBSET_CRDS=${ROOT_DIR}/dep-crds/jobset-operator/ - source ${SOURCE_DIR}/e2e-common.sh function cleanup { @@ -57,7 +53,6 @@ function kind_load { cluster_kind_load $KIND_CLUSTER_NAME fi docker pull registry.k8s.io/jobset/jobset:$JOBSET_VERSION - kubectl apply --server-side -f ${JOBSET_CRDS}/* install_jobset $KIND_CLUSTER_NAME } diff --git a/test/e2e/singlecluster/e2e_test.go b/test/e2e/singlecluster/e2e_test.go index d8187ee361..b6cae0ee8a 100644 --- a/test/e2e/singlecluster/e2e_test.go +++ b/test/e2e/singlecluster/e2e_test.go @@ -27,16 +27,13 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - jobset "sigs.k8s.io/jobset/api/jobset/v1alpha2" kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" "sigs.k8s.io/kueue/pkg/controller/constants" workloadjob "sigs.k8s.io/kueue/pkg/controller/jobs/job" - workloadjobset "sigs.k8s.io/kueue/pkg/controller/jobs/jobset" "sigs.k8s.io/kueue/pkg/util/slices" "sigs.k8s.io/kueue/pkg/util/testing" testingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job" - testingjobset "sigs.k8s.io/kueue/pkg/util/testingjobs/jobset" "sigs.k8s.io/kueue/pkg/workload" "sigs.k8s.io/kueue/test/util" ) @@ -493,85 +490,6 @@ var _ = ginkgo.Describe("Kueue", func() { }) }) }) - ginkgo.When("Creating a JobSet", func() { - var ( - defaultRf *kueue.ResourceFlavor - localQueue *kueue.LocalQueue - clusterQueue *kueue.ClusterQueue - ) - ginkgo.BeforeEach(func() { - defaultRf = testing.MakeResourceFlavor("default").Obj() - gomega.Expect(k8sClient.Create(ctx, defaultRf)).Should(gomega.Succeed()) - clusterQueue = testing.MakeClusterQueue("cluster-queue"). - ResourceGroup( - *testing.MakeFlavorQuotas(defaultRf.Name). - Resource(corev1.ResourceCPU, "2"). - Resource(corev1.ResourceMemory, "2G").Obj()).Obj() - gomega.Expect(k8sClient.Create(ctx, clusterQueue)).Should(gomega.Succeed()) - localQueue = testing.MakeLocalQueue("main", ns.Name).ClusterQueue("cluster-queue").Obj() - gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed()) - }) - ginkgo.AfterEach(func() { - gomega.Expect(util.DeleteLocalQueue(ctx, k8sClient, localQueue)).Should(gomega.Succeed()) - gomega.Expect(util.DeleteAllJobsetsInNamespace(ctx, k8sClient, ns)).Should(gomega.Succeed()) - util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true) - util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, defaultRf, true) - }) - - ginkgo.It("Should run a jobSet if admitted", func() { - jobSet := testingjobset.MakeJobSet("job-set", ns.Name). - Queue("main"). - ReplicatedJobs( - testingjobset.ReplicatedJobRequirements{ - Name: "replicated-job-1", - Replicas: 2, - Parallelism: 2, - Completions: 2, - Image: "gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", - // Give it the time to be observed Active in the live status update step. - Args: []string{"5s"}, - }, - ). - Request("replicated-job-1", "cpu", "500m"). - Request("replicated-job-1", "memory", "200M"). - Obj() - - ginkgo.By("Creating the jobSet", func() { - gomega.Expect(k8sClient.Create(ctx, jobSet)).Should(gomega.Succeed()) - }) - - createdLeaderWorkload := &kueue.Workload{} - wlLookupKey := types.NamespacedName{Name: workloadjobset.GetWorkloadNameForJobSet(jobSet.Name, jobSet.UID), Namespace: ns.Name} - - ginkgo.By("Waiting for the jobSet to get status updates", func() { - gomega.Eventually(func(g gomega.Gomega) { - createdJobset := &jobset.JobSet{} - g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(jobSet), createdJobset)).To(gomega.Succeed()) - - g.Expect(createdJobset.Status.ReplicatedJobsStatus).To(gomega.BeComparableTo([]jobset.ReplicatedJobStatus{ - { - Name: "replicated-job-1", - Ready: 2, - Active: 2, - }, - }, cmpopts.IgnoreFields(jobset.ReplicatedJobStatus{}, "Succeeded", "Failed"))) - }, util.LongTimeout, util.Interval).Should(gomega.Succeed()) - }) - - ginkgo.By("Waiting for the jobSet to finish", func() { - gomega.Eventually(func(g gomega.Gomega) { - g.Expect(k8sClient.Get(ctx, wlLookupKey, createdLeaderWorkload)).To(gomega.Succeed()) - - g.Expect(apimeta.FindStatusCondition(createdLeaderWorkload.Status.Conditions, kueue.WorkloadFinished)).To(gomega.BeComparableTo(&metav1.Condition{ - Type: kueue.WorkloadFinished, - Status: metav1.ConditionTrue, - Reason: "JobSetFinished", - Message: "JobSet finished successfully", - }, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"))) - }, util.LongTimeout, util.Interval).Should(gomega.Succeed()) - }) - }) - }) }) func expectJobUnsuspended(key types.NamespacedName) { diff --git a/test/e2e/singlecluster/jobset_test.go b/test/e2e/singlecluster/jobset_test.go new file mode 100644 index 0000000000..bf1f5b747a --- /dev/null +++ b/test/e2e/singlecluster/jobset_test.go @@ -0,0 +1,115 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" + workloadjobset "sigs.k8s.io/kueue/pkg/controller/jobs/jobset" + "sigs.k8s.io/kueue/pkg/util/testing" + testingjobset "sigs.k8s.io/kueue/pkg/util/testingjobs/jobset" + "sigs.k8s.io/kueue/test/util" +) + +// +kubebuilder:docs-gen:collapse=Imports + +var _ = ginkgo.Describe("Kueue", func() { + var ns *corev1.Namespace + + ginkgo.BeforeEach(func() { + ns = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "e2e-", + }, + } + gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) + }) + ginkgo.AfterEach(func() { + gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed()) + }) + ginkgo.When("Creating a JobSet", func() { + var ( + defaultRf *kueue.ResourceFlavor + localQueue *kueue.LocalQueue + clusterQueue *kueue.ClusterQueue + ) + ginkgo.BeforeEach(func() { + defaultRf = testing.MakeResourceFlavor("default").Obj() + gomega.Expect(k8sClient.Create(ctx, defaultRf)).Should(gomega.Succeed()) + clusterQueue = testing.MakeClusterQueue("cluster-queue"). + ResourceGroup( + *testing.MakeFlavorQuotas(defaultRf.Name). + Resource(corev1.ResourceCPU, "2"). + Resource(corev1.ResourceMemory, "2G").Obj()).Obj() + gomega.Expect(k8sClient.Create(ctx, clusterQueue)).Should(gomega.Succeed()) + localQueue = testing.MakeLocalQueue("main", ns.Name).ClusterQueue("cluster-queue").Obj() + gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed()) + }) + ginkgo.AfterEach(func() { + gomega.Expect(util.DeleteLocalQueue(ctx, k8sClient, localQueue)).Should(gomega.Succeed()) + gomega.Expect(util.DeleteAllJobsetsInNamespace(ctx, k8sClient, ns)).Should(gomega.Succeed()) + util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true) + util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, defaultRf, true) + }) + + ginkgo.It("Should run a jobSet if admitted", func() { + jobSet := testingjobset.MakeJobSet("job-set", ns.Name). + Queue("main"). + ReplicatedJobs( + testingjobset.ReplicatedJobRequirements{ + Name: "replicated-job-1", + Replicas: 2, + Parallelism: 2, + Completions: 2, + Image: "gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", + // Give it the time to be observed Active in the live status update step. + Args: []string{"1ms"}, + }, + ). + Request("replicated-job-1", "cpu", "500m"). + Request("replicated-job-1", "memory", "200M"). + Obj() + + ginkgo.By("Creating the jobSet", func() { + gomega.Expect(k8sClient.Create(ctx, jobSet)).Should(gomega.Succeed()) + }) + + createdLeaderWorkload := &kueue.Workload{} + wlLookupKey := types.NamespacedName{Name: workloadjobset.GetWorkloadNameForJobSet(jobSet.Name, jobSet.UID), Namespace: ns.Name} + + ginkgo.By("Waiting for the jobSet to finish", func() { + gomega.Eventually(func(g gomega.Gomega) { + g.Expect(k8sClient.Get(ctx, wlLookupKey, createdLeaderWorkload)).To(gomega.Succeed()) + + g.Expect(apimeta.FindStatusCondition(createdLeaderWorkload.Status.Conditions, kueue.WorkloadFinished)).To(gomega.BeComparableTo(&metav1.Condition{ + Type: kueue.WorkloadFinished, + Status: metav1.ConditionTrue, + Reason: "JobSetFinished", + Message: "JobSet finished successfully", + }, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"))) + }, util.LongTimeout, util.Interval).Should(gomega.Succeed()) + }) + }) + }) +})