diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index 4a4ac7bb7cf1..84311ebd077e 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -1422,7 +1422,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { wantSucceededPodsMetric: 3, wantFailedPodsMetric: 3, }, - "succeeding job": { + "succeeding job by JobSuccessPolicy": { pods: []*v1.Pod{ buildPod().uid("a").phase(v1.PodSucceeded).trackingFinalizer().Pod, buildPod().uid("b").phase(v1.PodFailed).trackingFinalizer().Pod, @@ -3834,6 +3834,70 @@ func TestSyncJobWithJobSuccessPolicy(t *testing.T) { }, }, }, + // In the current mechanism, the job controller adds Complete condition to Job + // even if some running pods still remain. + // So, we need to revisit here before we graduate the JobSuccessPolicy to beta. + // TODO(#123775): A Job might finish with ready!=0 + // REF: https://github.com/kubernetes/kubernetes/issues/123775 + "job with successPolicy; job has SuccessCriteriaMet and Complete condition when job meets to successPolicy and some pods still are running": { + enableJobSuccessPolicy: true, + job: batch.Job{ + TypeMeta: validTypeMeta, + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validTemplate, + CompletionMode: completionModePtr(batch.IndexedCompletion), + Parallelism: ptr.To[int32](3), + Completions: ptr.To[int32](3), + BackoffLimit: ptr.To[int32](math.MaxInt32), + BackoffLimitPerIndex: ptr.To[int32](3), + SuccessPolicy: &batch.SuccessPolicy{ + Rules: []batch.SuccessPolicyRule{{ + SucceededIndexes: ptr.To("0,1"), + SucceededCount: ptr.To[int32](1), + }}, + }, + }, + Status: batch.JobStatus{ + Conditions: []batch.JobCondition{ + { + Type: batch.JobSuccessCriteriaMet, + Status: v1.ConditionTrue, + Reason: batch.JobReasonSuccessPolicy, + Message: "Matched rules at index 0", + }, + }, + }, + }, + pods: []v1.Pod{ + *buildPod().uid("a1").index("0").phase(v1.PodFailed).trackingFinalizer().Pod, + *buildPod().uid("a2").index("1").phase(v1.PodRunning).trackingFinalizer().Pod, + *buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod, + *buildPod().uid("c").index("2").phase(v1.PodRunning).trackingFinalizer().Pod, + }, + wantStatus: batch.JobStatus{ + Failed: 1, + Succeeded: 1, + Terminating: ptr.To[int32](0), + CompletedIndexes: "1", + UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, + Conditions: []batch.JobCondition{ + { + Type: batch.JobSuccessCriteriaMet, + Status: v1.ConditionTrue, + Reason: batch.JobReasonSuccessPolicy, + Message: "Matched rules at index 0", + }, + { + Type: batch.JobComplete, + Status: v1.ConditionTrue, + Reason: batch.JobReasonSuccessPolicy, + Message: "Matched rules at index 0", + }, + }, + }, + }, "job with successPolicy and podFailurePolicy; job has a failed condition when job meets to both successPolicy and podFailurePolicy": { enableJobSuccessPolicy: true, enableJobFailurePolicy: true,