From 33155da284daa48940e8bbbee06e372007e862a0 Mon Sep 17 00:00:00 2001 From: Yuki Iwai Date: Fri, 8 Mar 2024 04:34:05 +0900 Subject: [PATCH] Job: Add a unit test case so that we can prove the Complete condition is added even if some running pods still remain Signed-off-by: Yuki Iwai --- pkg/controller/job/job_controller_test.go | 66 ++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index 4a4ac7bb7cf12..84311ebd077e5 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -1422,7 +1422,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { wantSucceededPodsMetric: 3, wantFailedPodsMetric: 3, }, - "succeeding job": { + "succeeding job by JobSuccessPolicy": { pods: []*v1.Pod{ buildPod().uid("a").phase(v1.PodSucceeded).trackingFinalizer().Pod, buildPod().uid("b").phase(v1.PodFailed).trackingFinalizer().Pod, @@ -3834,6 +3834,70 @@ func TestSyncJobWithJobSuccessPolicy(t *testing.T) { }, }, }, + // In the current mechanism, the job controller adds Complete condition to Job + // even if some running pods still remain. + // So, we need to revisit here before we graduate the JobSuccessPolicy to beta. + // TODO(#123775): A Job might finish with ready!=0 + // REF: https://github.com/kubernetes/kubernetes/issues/123775 + "job with successPolicy; job has SuccessCriteriaMet and Complete condition when job meets to successPolicy and some pods still are running": { + enableJobSuccessPolicy: true, + job: batch.Job{ + TypeMeta: validTypeMeta, + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validTemplate, + CompletionMode: completionModePtr(batch.IndexedCompletion), + Parallelism: ptr.To[int32](3), + Completions: ptr.To[int32](3), + BackoffLimit: ptr.To[int32](math.MaxInt32), + BackoffLimitPerIndex: ptr.To[int32](3), + SuccessPolicy: &batch.SuccessPolicy{ + Rules: []batch.SuccessPolicyRule{{ + SucceededIndexes: ptr.To("0,1"), + SucceededCount: ptr.To[int32](1), + }}, + }, + }, + Status: batch.JobStatus{ + Conditions: []batch.JobCondition{ + { + Type: batch.JobSuccessCriteriaMet, + Status: v1.ConditionTrue, + Reason: batch.JobReasonSuccessPolicy, + Message: "Matched rules at index 0", + }, + }, + }, + }, + pods: []v1.Pod{ + *buildPod().uid("a1").index("0").phase(v1.PodFailed).trackingFinalizer().Pod, + *buildPod().uid("a2").index("1").phase(v1.PodRunning).trackingFinalizer().Pod, + *buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod, + *buildPod().uid("c").index("2").phase(v1.PodRunning).trackingFinalizer().Pod, + }, + wantStatus: batch.JobStatus{ + Failed: 1, + Succeeded: 1, + Terminating: ptr.To[int32](0), + CompletedIndexes: "1", + UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, + Conditions: []batch.JobCondition{ + { + Type: batch.JobSuccessCriteriaMet, + Status: v1.ConditionTrue, + Reason: batch.JobReasonSuccessPolicy, + Message: "Matched rules at index 0", + }, + { + Type: batch.JobComplete, + Status: v1.ConditionTrue, + Reason: batch.JobReasonSuccessPolicy, + Message: "Matched rules at index 0", + }, + }, + }, + }, "job with successPolicy and podFailurePolicy; job has a failed condition when job meets to both successPolicy and podFailurePolicy": { enableJobSuccessPolicy: true, enableJobFailurePolicy: true,