Skip to content

Commit

Permalink
pr comments
Browse files Browse the repository at this point in the history
  • Loading branch information
kannon92 committed Mar 26, 2024
1 parent 4b479a1 commit dbd4a08
Show file tree
Hide file tree
Showing 4 changed files with 115 additions and 88 deletions.
1 change: 0 additions & 1 deletion Makefile
Expand Up @@ -191,7 +191,6 @@ MULTIKUEUE-E2E_TARGETS := $(addprefix run-test-multikueue-e2e-,${E2E_K8S_VERSION
.PHONY: test-e2e-all
test-e2e-all: ginkgo $(E2E_TARGETS) $(MULTIKUEUE-E2E_TARGETS)


FORCE:

run-test-e2e-%: K8S_VERSION = $(@:run-test-e2e-%=%)
Expand Down
5 changes: 0 additions & 5 deletions hack/e2e-test.sh
Expand Up @@ -22,10 +22,6 @@ SOURCE_DIR="$(cd "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
ROOT_DIR="$SOURCE_DIR/.."
export E2E_TEST_IMAGE=gcr.io/k8s-staging-perf-tests/sleep:v0.1.0

export JOBSET_MANIFEST=https://github.com/kubernetes-sigs/jobset/releases/download/${JOBSET_VERSION}/manifests.yaml
export JOBSET_IMAGE=registry.k8s.io/jobset/jobset:${JOBSET_VERSION}
export JOBSET_CRDS=${ROOT_DIR}/dep-crds/jobset-operator/

source ${SOURCE_DIR}/e2e-common.sh

function cleanup {
Expand Down Expand Up @@ -57,7 +53,6 @@ function kind_load {
cluster_kind_load $KIND_CLUSTER_NAME
fi
docker pull registry.k8s.io/jobset/jobset:$JOBSET_VERSION
kubectl apply --server-side -f ${JOBSET_CRDS}/*
install_jobset $KIND_CLUSTER_NAME
}

Expand Down
82 changes: 0 additions & 82 deletions test/e2e/singlecluster/e2e_test.go
Expand Up @@ -27,16 +27,13 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
jobset "sigs.k8s.io/jobset/api/jobset/v1alpha2"

kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
"sigs.k8s.io/kueue/pkg/controller/constants"
workloadjob "sigs.k8s.io/kueue/pkg/controller/jobs/job"
workloadjobset "sigs.k8s.io/kueue/pkg/controller/jobs/jobset"
"sigs.k8s.io/kueue/pkg/util/slices"
"sigs.k8s.io/kueue/pkg/util/testing"
testingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job"
testingjobset "sigs.k8s.io/kueue/pkg/util/testingjobs/jobset"
"sigs.k8s.io/kueue/pkg/workload"
"sigs.k8s.io/kueue/test/util"
)
Expand Down Expand Up @@ -493,85 +490,6 @@ var _ = ginkgo.Describe("Kueue", func() {
})
})
})
ginkgo.When("Creating a JobSet", func() {
var (
defaultRf *kueue.ResourceFlavor
localQueue *kueue.LocalQueue
clusterQueue *kueue.ClusterQueue
)
ginkgo.BeforeEach(func() {
defaultRf = testing.MakeResourceFlavor("default").Obj()
gomega.Expect(k8sClient.Create(ctx, defaultRf)).Should(gomega.Succeed())
clusterQueue = testing.MakeClusterQueue("cluster-queue").
ResourceGroup(
*testing.MakeFlavorQuotas(defaultRf.Name).
Resource(corev1.ResourceCPU, "2").
Resource(corev1.ResourceMemory, "2G").Obj()).Obj()
gomega.Expect(k8sClient.Create(ctx, clusterQueue)).Should(gomega.Succeed())
localQueue = testing.MakeLocalQueue("main", ns.Name).ClusterQueue("cluster-queue").Obj()
gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed())
})
ginkgo.AfterEach(func() {
gomega.Expect(util.DeleteLocalQueue(ctx, k8sClient, localQueue)).Should(gomega.Succeed())
gomega.Expect(util.DeleteAllJobsetsInNamespace(ctx, k8sClient, ns)).Should(gomega.Succeed())
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, defaultRf, true)
})

ginkgo.It("Should run a jobSet if admitted", func() {
jobSet := testingjobset.MakeJobSet("job-set", ns.Name).
Queue("main").
ReplicatedJobs(
testingjobset.ReplicatedJobRequirements{
Name: "replicated-job-1",
Replicas: 2,
Parallelism: 2,
Completions: 2,
Image: "gcr.io/k8s-staging-perf-tests/sleep:v0.1.0",
// Give it the time to be observed Active in the live status update step.
Args: []string{"5s"},
},
).
Request("replicated-job-1", "cpu", "500m").
Request("replicated-job-1", "memory", "200M").
Obj()

ginkgo.By("Creating the jobSet", func() {
gomega.Expect(k8sClient.Create(ctx, jobSet)).Should(gomega.Succeed())
})

createdLeaderWorkload := &kueue.Workload{}
wlLookupKey := types.NamespacedName{Name: workloadjobset.GetWorkloadNameForJobSet(jobSet.Name, jobSet.UID), Namespace: ns.Name}

ginkgo.By("Waiting for the jobSet to get status updates", func() {
gomega.Eventually(func(g gomega.Gomega) {
createdJobset := &jobset.JobSet{}
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(jobSet), createdJobset)).To(gomega.Succeed())

g.Expect(createdJobset.Status.ReplicatedJobsStatus).To(gomega.BeComparableTo([]jobset.ReplicatedJobStatus{
{
Name: "replicated-job-1",
Ready: 2,
Active: 2,
},
}, cmpopts.IgnoreFields(jobset.ReplicatedJobStatus{}, "Succeeded", "Failed")))
}, util.LongTimeout, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("Waiting for the jobSet to finish", func() {
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, wlLookupKey, createdLeaderWorkload)).To(gomega.Succeed())

g.Expect(apimeta.FindStatusCondition(createdLeaderWorkload.Status.Conditions, kueue.WorkloadFinished)).To(gomega.BeComparableTo(&metav1.Condition{
Type: kueue.WorkloadFinished,
Status: metav1.ConditionTrue,
Reason: "JobSetFinished",
Message: "JobSet finished successfully",
}, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")))
}, util.LongTimeout, util.Interval).Should(gomega.Succeed())
})
})
})
})

func expectJobUnsuspended(key types.NamespacedName) {
Expand Down
115 changes: 115 additions & 0 deletions test/e2e/singlecluster/jobset_test.go
@@ -0,0 +1,115 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"

kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
workloadjobset "sigs.k8s.io/kueue/pkg/controller/jobs/jobset"
"sigs.k8s.io/kueue/pkg/util/testing"
testingjobset "sigs.k8s.io/kueue/pkg/util/testingjobs/jobset"
"sigs.k8s.io/kueue/test/util"
)

// +kubebuilder:docs-gen:collapse=Imports

var _ = ginkgo.Describe("Kueue", func() {
var ns *corev1.Namespace

ginkgo.BeforeEach(func() {
ns = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "e2e-",
},
}
gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())
})
ginkgo.AfterEach(func() {
gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
})
ginkgo.When("Creating a JobSet", func() {
var (
defaultRf *kueue.ResourceFlavor
localQueue *kueue.LocalQueue
clusterQueue *kueue.ClusterQueue
)
ginkgo.BeforeEach(func() {
defaultRf = testing.MakeResourceFlavor("default").Obj()
gomega.Expect(k8sClient.Create(ctx, defaultRf)).Should(gomega.Succeed())
clusterQueue = testing.MakeClusterQueue("cluster-queue").
ResourceGroup(
*testing.MakeFlavorQuotas(defaultRf.Name).
Resource(corev1.ResourceCPU, "2").
Resource(corev1.ResourceMemory, "2G").Obj()).Obj()
gomega.Expect(k8sClient.Create(ctx, clusterQueue)).Should(gomega.Succeed())
localQueue = testing.MakeLocalQueue("main", ns.Name).ClusterQueue("cluster-queue").Obj()
gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed())
})
ginkgo.AfterEach(func() {
gomega.Expect(util.DeleteLocalQueue(ctx, k8sClient, localQueue)).Should(gomega.Succeed())
gomega.Expect(util.DeleteAllJobsetsInNamespace(ctx, k8sClient, ns)).Should(gomega.Succeed())
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, defaultRf, true)
})

ginkgo.It("Should run a jobSet if admitted", func() {
jobSet := testingjobset.MakeJobSet("job-set", ns.Name).
Queue("main").
ReplicatedJobs(
testingjobset.ReplicatedJobRequirements{
Name: "replicated-job-1",
Replicas: 2,
Parallelism: 2,
Completions: 2,
Image: "gcr.io/k8s-staging-perf-tests/sleep:v0.1.0",
// Give it the time to be observed Active in the live status update step.
Args: []string{"1ms"},
},
).
Request("replicated-job-1", "cpu", "500m").
Request("replicated-job-1", "memory", "200M").
Obj()

ginkgo.By("Creating the jobSet", func() {
gomega.Expect(k8sClient.Create(ctx, jobSet)).Should(gomega.Succeed())
})

createdLeaderWorkload := &kueue.Workload{}
wlLookupKey := types.NamespacedName{Name: workloadjobset.GetWorkloadNameForJobSet(jobSet.Name, jobSet.UID), Namespace: ns.Name}

ginkgo.By("Waiting for the jobSet to finish", func() {
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, wlLookupKey, createdLeaderWorkload)).To(gomega.Succeed())

g.Expect(apimeta.FindStatusCondition(createdLeaderWorkload.Status.Conditions, kueue.WorkloadFinished)).To(gomega.BeComparableTo(&metav1.Condition{
Type: kueue.WorkloadFinished,
Status: metav1.ConditionTrue,
Reason: "JobSetFinished",
Message: "JobSet finished successfully",
}, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")))
}, util.LongTimeout, util.Interval).Should(gomega.Succeed())
})
})
})
})

0 comments on commit dbd4a08

Please sign in to comment.