diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 03bc26a97645..b0e7236fdc50 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -4295,6 +4295,10 @@ "format": "int32", "type": "integer" }, + "managedBy": { + "description": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 64 characters.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", + "type": "string" + }, "manualSelector": { "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "type": "boolean" @@ -4344,7 +4348,7 @@ "description": "JobStatus represents the current state of a Job.", "properties": { "active": { - "description": "The number of pending and running pods which are not terminating (without a deletionTimestamp).", + "description": "The number of pending and running pods which are not terminating (without a deletionTimestamp). The value is zero for finished jobs.", "format": "int32", "type": "integer" }, @@ -4354,10 +4358,10 @@ }, "completionTime": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully." + "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is set when the job finishes successfully, and only then. The value cannot be updated or removed. The value indicates the same or later point in time as the startTime field." }, "conditions": { - "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true.\n\nA job is considered finished when it is in a terminal condition, either \"Complete\" or \"Failed\". At that point, all pods of the job are in terminal phase. Job cannot be both in the \"Complete\" and \"Failed\" conditions. Additionally, it cannot be in the \"Complete\" and \"FailureTarget\" conditions. The \"Complete\", \"Failed\" and \"FailureTarget\" conditions cannot be disabled.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "items": { "$ref": "#/definitions/io.k8s.api.batch.v1.JobCondition" }, @@ -4367,36 +4371,36 @@ "x-kubernetes-patch-strategy": "merge" }, "failed": { - "description": "The number of pods which reached phase Failed.", + "description": "The number of pods which reached phase Failed. The value increases monotonically.", "format": "int32", "type": "integer" }, "failedIndexes": { - "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "description": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "type": "string" }, "ready": { - "description": "The number of pods which have a Ready condition.", + "description": "The number of pods which have a Ready condition. The value is zero (or null) for finished jobs.", "format": "int32", "type": "integer" }, "startTime": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC." + "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.\n\nOnce set, the field can only be removed when the job is suspended. The field cannot be modified while the job is unsuspended or finished." }, "succeeded": { - "description": "The number of pods which reached phase Succeeded.", + "description": "The number of pods which reached phase Succeeded. The value increases monotonically for a given spec. However, it may decrease in reaction to scale down of elastic indexed jobs.", "format": "int32", "type": "integer" }, "terminating": { - "description": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", + "description": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp). The value is zero (or null) for finished jobs.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", "format": "int32", "type": "integer" }, "uncountedTerminatedPods": { "$ref": "#/definitions/io.k8s.api.batch.v1.UncountedTerminatedPods", - "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null." + "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs." } }, "type": "object" diff --git a/api/openapi-spec/v3/apis__batch__v1_openapi.json b/api/openapi-spec/v3/apis__batch__v1_openapi.json index d1bdf4ee33fa..56f703633bba 100644 --- a/api/openapi-spec/v3/apis__batch__v1_openapi.json +++ b/api/openapi-spec/v3/apis__batch__v1_openapi.json @@ -344,6 +344,10 @@ "format": "int32", "type": "integer" }, + "managedBy": { + "description": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 64 characters.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", + "type": "string" + }, "manualSelector": { "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "type": "boolean" @@ -406,7 +410,7 @@ "description": "JobStatus represents the current state of a Job.", "properties": { "active": { - "description": "The number of pending and running pods which are not terminating (without a deletionTimestamp).", + "description": "The number of pending and running pods which are not terminating (without a deletionTimestamp). The value is zero for finished jobs.", "format": "int32", "type": "integer" }, @@ -420,10 +424,10 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully." + "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is set when the job finishes successfully, and only then. The value cannot be updated or removed. The value indicates the same or later point in time as the startTime field." }, "conditions": { - "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true.\n\nA job is considered finished when it is in a terminal condition, either \"Complete\" or \"Failed\". At that point, all pods of the job are in terminal phase. Job cannot be both in the \"Complete\" and \"Failed\" conditions. Additionally, it cannot be in the \"Complete\" and \"FailureTarget\" conditions. The \"Complete\", \"Failed\" and \"FailureTarget\" conditions cannot be disabled.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "items": { "allOf": [ { @@ -438,16 +442,16 @@ "x-kubernetes-patch-strategy": "merge" }, "failed": { - "description": "The number of pods which reached phase Failed.", + "description": "The number of pods which reached phase Failed. The value increases monotonically.", "format": "int32", "type": "integer" }, "failedIndexes": { - "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "description": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "type": "string" }, "ready": { - "description": "The number of pods which have a Ready condition.", + "description": "The number of pods which have a Ready condition. The value is zero (or null) for finished jobs.", "format": "int32", "type": "integer" }, @@ -457,15 +461,15 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC." + "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.\n\nOnce set, the field can only be removed when the job is suspended. The field cannot be modified while the job is unsuspended or finished." }, "succeeded": { - "description": "The number of pods which reached phase Succeeded.", + "description": "The number of pods which reached phase Succeeded. The value increases monotonically for a given spec. However, it may decrease in reaction to scale down of elastic indexed jobs.", "format": "int32", "type": "integer" }, "terminating": { - "description": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", + "description": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp). The value is zero (or null) for finished jobs.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", "format": "int32", "type": "integer" }, @@ -475,7 +479,7 @@ "$ref": "#/components/schemas/io.k8s.api.batch.v1.UncountedTerminatedPods" } ], - "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null." + "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs." } }, "type": "object" diff --git a/pkg/apis/batch/fuzzer/fuzzer.go b/pkg/apis/batch/fuzzer/fuzzer.go index 832de7d2f665..dc18c157ac53 100644 --- a/pkg/apis/batch/fuzzer/fuzzer.go +++ b/pkg/apis/batch/fuzzer/fuzzer.go @@ -64,6 +64,9 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} { podReplacementPolicy = batch.Failed } j.PodReplacementPolicy = &podReplacementPolicy + if c.RandBool() { + c.Fuzz(j.ManagedBy) + } }, func(sj *batch.CronJobSpec, c fuzz.Continue) { c.FuzzNoCustom(sj) diff --git a/pkg/apis/batch/types.go b/pkg/apis/batch/types.go index 8ac1d5ab7fea..879d6f4f6a25 100644 --- a/pkg/apis/batch/types.go +++ b/pkg/apis/batch/types.go @@ -51,6 +51,9 @@ const ( // to the pod, which don't count towards the backoff limit, according to the // pod failure policy. When the annotation is absent zero is implied. JobIndexIgnoredFailureCountAnnotation = labelPrefix + "job-index-ignored-failure-count" + // JobControllerName reserved value for the managedBy field for the built-in + // Job controller. + JobControllerName = "kubernetes.io/job-controller" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -409,6 +412,20 @@ type JobSpec struct { // This is on by default. // +optional PodReplacementPolicy *PodReplacementPolicy + + // ManagedBy field indicates the controller that manages a Job. The k8s Job + // controller reconciles jobs which don't have this field at all or the field + // value is the reserved string `kubernetes.io/job-controller`, but skips + // reconciling Jobs with a custom value for this field. + // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - + // all characters before the first "/" must be a valid subdomain as defined + // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path + // characters as defined by RFC 3986. The value cannot exceed 64 characters. + // + // This field is alpha-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (disabled by default). + // +optional + ManagedBy *string } // JobStatus represents the current state of a Job. @@ -420,6 +437,13 @@ type JobStatus struct { // status true; when the Job is resumed, the status of this condition will // become false. When a Job is completed, one of the conditions will have // type "Complete" and status true. + // + // A job is considered finished when it is in a terminal condition, either + // "Complete" or "Failed". At that point, all pods of the job are in terminal + // phase. Job cannot be both in the "Complete" and "Failed" conditions. + // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions. + // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled. + // // +optional Conditions []JobCondition @@ -427,23 +451,31 @@ type JobStatus struct { // Job is created in the suspended state, this field is not set until the // first time it is resumed. This field is reset every time a Job is resumed // from suspension. It is represented in RFC3339 form and is in UTC. + // + // Once set, the field can only be removed when the job is suspended. + // The field cannot be modified while the job is unsuspended or finished. + // // +optional StartTime *metav1.Time // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - // The completion time is only set when the job finishes successfully. + // The completion time is set when the job finishes successfully, and only then. + // The value cannot be updated or removed. The value indicates the same or + // later point in time as the startTime field. // +optional CompletionTime *metav1.Time // The number of pending and running pods which are not terminating (without // a deletionTimestamp). + // The value is zero for finished jobs. // +optional Active int32 // The number of pods which are terminating (in phase Pending or Running // and have a deletionTimestamp). + // The value is zero (or null) for finished jobs. // // This field is beta-level. The job controller populates the field when // the feature gate JobPodReplacementPolicy is enabled (enabled by default). @@ -451,14 +483,18 @@ type JobStatus struct { Terminating *int32 // The number of active pods which have a Ready condition. + // The value is zero (or null) for finished jobs. // +optional Ready *int32 // The number of pods which reached phase Succeeded. + // The value increases monotonically for a given spec. However, it may + // decrease in reaction to scale down of elastic indexed jobs. // +optional Succeeded int32 // The number of pods which reached phase Failed. + // The value increases monotonically. // +optional Failed int32 @@ -472,7 +508,7 @@ type JobStatus struct { // +optional CompletedIndexes string - // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. // The indexes are represented in the text format analogous as for the // `completedIndexes` field, ie. they are kept as decimal integers // separated by commas. The numbers are listed in increasing order. Three or @@ -480,6 +516,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". + // The set of failed indexes cannot overlap with the set of completed indexes. + // // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` // feature gate is enabled (enabled by default). // +optional @@ -499,6 +537,7 @@ type JobStatus struct { // // Old jobs might not be tracked using this field, in which case the field // remains null. + // The structure is empty for finished jobs. // +optional UncountedTerminatedPods *UncountedTerminatedPods } diff --git a/pkg/apis/batch/v1/zz_generated.conversion.go b/pkg/apis/batch/v1/zz_generated.conversion.go index 603b7e36975d..7c7cef8afdab 100644 --- a/pkg/apis/batch/v1/zz_generated.conversion.go +++ b/pkg/apis/batch/v1/zz_generated.conversion.go @@ -452,6 +452,7 @@ func autoConvert_v1_JobSpec_To_batch_JobSpec(in *v1.JobSpec, out *batch.JobSpec, out.CompletionMode = (*batch.CompletionMode)(unsafe.Pointer(in.CompletionMode)) out.Suspend = (*bool)(unsafe.Pointer(in.Suspend)) out.PodReplacementPolicy = (*batch.PodReplacementPolicy)(unsafe.Pointer(in.PodReplacementPolicy)) + out.ManagedBy = (*string)(unsafe.Pointer(in.ManagedBy)) return nil } @@ -472,6 +473,7 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *v1.JobSpec, out.CompletionMode = (*v1.CompletionMode)(unsafe.Pointer(in.CompletionMode)) out.Suspend = (*bool)(unsafe.Pointer(in.Suspend)) out.PodReplacementPolicy = (*v1.PodReplacementPolicy)(unsafe.Pointer(in.PodReplacementPolicy)) + out.ManagedBy = (*string)(unsafe.Pointer(in.ManagedBy)) return nil } diff --git a/pkg/apis/batch/validation/validation.go b/pkg/apis/batch/validation/validation.go index e2f2a80a7e18..9da342385b50 100644 --- a/pkg/apis/batch/validation/validation.go +++ b/pkg/apis/batch/validation/validation.go @@ -19,6 +19,7 @@ package validation import ( "fmt" "regexp" + "strconv" "strings" "time" @@ -36,6 +37,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // maxParallelismForIndexJob is the maximum parallelism that an Indexed Job @@ -61,6 +63,9 @@ const ( // maximum number of patterns for a OnPodConditions requirement in pod failure policy maxPodFailurePolicyOnPodConditionsPatterns = 20 + + // maximum length of the value of the managedBy field + maxManagedByLength = 63 ) var ( @@ -206,6 +211,12 @@ func validateJobSpec(spec *batch.JobSpec, fldPath *field.Path, opts apivalidatio allErrs = append(allErrs, field.Required(fldPath.Child("backoffLimitPerIndex"), fmt.Sprintf("when maxFailedIndexes is specified"))) } } + if spec.ManagedBy != nil { + allErrs = append(allErrs, apimachineryvalidation.IsDomainPrefixedPath(fldPath.Child("managedBy"), *spec.ManagedBy)...) + if len(*spec.ManagedBy) > maxManagedByLength { + allErrs = append(allErrs, field.TooLongMaxLength(fldPath.Child("managedBy"), *spec.ManagedBy, maxManagedByLength)) + } + } if spec.CompletionMode != nil { if *spec.CompletionMode != batch.NonIndexedCompletion && *spec.CompletionMode != batch.IndexedCompletion { allErrs = append(allErrs, field.NotSupported(fldPath.Child("completionMode"), spec.CompletionMode, []batch.CompletionMode{batch.NonIndexedCompletion, batch.IndexedCompletion})) @@ -390,8 +401,9 @@ func validatePodFailurePolicyRuleOnExitCodes(onExitCode *batch.PodFailurePolicyO } // validateJobStatus validates a JobStatus and returns an ErrorList with any errors. -func validateJobStatus(status *batch.JobStatus, fldPath *field.Path) field.ErrorList { +func validateJobStatus(job *batch.Job, fldPath *field.Path, opts JobStatusValidationOptions) field.ErrorList { allErrs := field.ErrorList{} + status := job.Status allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Active), fldPath.Child("active"))...) allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Succeeded), fldPath.Child("succeeded"))...) allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Failed), fldPath.Child("failed"))...) @@ -425,6 +437,91 @@ func validateJobStatus(status *batch.JobStatus, fldPath *field.Path) field.Error } } } + if opts.RejectCompleteJobWithFailedCondition { + if IsJobComplete(job) && IsJobFailed(job) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set Complete=True and Failed=true conditions")) + } + } + if opts.RejectCompleteJobWithFailureTargetCondition { + if IsJobComplete(job) && IsConditionTrue(status.Conditions, batch.JobFailureTarget) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set Complete=True and FailureTarget=true conditions")) + } + } + if opts.RejectNotCompleteJobWithCompletionTime { + if status.CompletionTime != nil && !IsJobComplete(job) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("completionTime"), status.CompletionTime, "cannot set completionTime when there is no Complete=True condition")) + } + } + if opts.RejectCompleteJobWithoutCompletionTime { + if status.CompletionTime == nil && IsJobComplete(job) { + allErrs = append(allErrs, field.Required(fldPath.Child("completionTime"), "completionTime is required for Complete jobs")) + } + } + if opts.RejectCompletionTimeBeforeStartTime { + if status.StartTime != nil && status.CompletionTime != nil && status.CompletionTime.Before(status.StartTime) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("completionTime"), status.CompletionTime, "completionTime cannot be set before startTime")) + } + } + isJobFinished := IsJobFinished(job) + if opts.RejectFinishedJobWithActivePods { + if status.Active > 0 && isJobFinished { + allErrs = append(allErrs, field.Invalid(fldPath.Child("active"), status.Active, "active>0 is invalid for finished job")) + } + } + if opts.RejectFinishedJobWithTerminatingPods { + if status.Terminating != nil && *status.Terminating > 0 && isJobFinished { + allErrs = append(allErrs, field.Invalid(fldPath.Child("terminating"), status.Terminating, "terminating>0 is invalid for finished job")) + } + } + if opts.RejectFinishedJobWithoutStartTime { + if status.StartTime == nil && isJobFinished { + allErrs = append(allErrs, field.Required(fldPath.Child("startTime"), "startTime is required for finished job")) + } + } + if opts.RejectFinishedJobWithUncountedTerminatedPods { + if isJobFinished && status.UncountedTerminatedPods != nil && len(status.UncountedTerminatedPods.Failed)+len(status.UncountedTerminatedPods.Succeeded) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uncountedTerminatedPods"), status.UncountedTerminatedPods, "uncountedTerminatedPods needs to be empty for finished job")) + } + } + if opts.RejectInvalidCompletedIndexes { + if job.Spec.Completions != nil { + if err := validateIndexesFormat(status.CompletedIndexes, int32(*job.Spec.Completions)); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("completedIndexes"), status.CompletedIndexes, fmt.Sprintf("error parsing completedIndexes: %s", err.Error()))) + } + } + } + if opts.RejectInvalidFailedIndexes { + if job.Spec.Completions != nil && job.Spec.BackoffLimitPerIndex != nil && status.FailedIndexes != nil { + if err := validateIndexesFormat(*status.FailedIndexes, int32(*job.Spec.Completions)); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("failedIndexes"), status.FailedIndexes, fmt.Sprintf("error parsing failedIndexes: %s", err.Error()))) + } + } + } + isIndexed := ptr.Deref(job.Spec.CompletionMode, batch.NonIndexedCompletion) == batch.IndexedCompletion + if opts.RejectCompletedIndexesForNonIndexedJob { + if len(status.CompletedIndexes) != 0 && !isIndexed { + allErrs = append(allErrs, field.Invalid(fldPath.Child("completedIndexes"), status.CompletedIndexes, "cannot set non-empty completedIndexes when non-indexed completion mode")) + } + } + if opts.RejectFailedIndexesForNoBackoffLimitPerIndex { + // Note that this check also verifies that FailedIndexes are not used for + // regular (non-indexed) jobs, because regular jobs have backoffLimitPerIndex = nil. + if job.Spec.BackoffLimitPerIndex == nil && status.FailedIndexes != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("failedIndexes"), *status.FailedIndexes, "cannot set non-null failedIndexes when backoffLimitPerIndex is null")) + } + } + if opts.RejectMoreReadyThanActivePods { + if status.Ready != nil && *status.Ready > status.Active { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ready"), *status.Ready, "cannot set more ready pods than active")) + } + } + if opts.RejectFailedIndexesOverlappingCompleted { + if job.Spec.Completions != nil && status.FailedIndexes != nil { + if err := validateFailedIndexesNotOverlapCompleted(status.CompletedIndexes, *status.FailedIndexes, int32(*job.Spec.Completions)); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("failedIndexes"), *status.FailedIndexes, err.Error())) + } + } + } return allErrs } @@ -436,9 +533,9 @@ func ValidateJobUpdate(job, oldJob *batch.Job, opts JobValidationOptions) field. } // ValidateJobUpdateStatus validates an update to the status of a Job and returns an ErrorList with any errors. -func ValidateJobUpdateStatus(job, oldJob *batch.Job) field.ErrorList { +func ValidateJobUpdateStatus(job, oldJob *batch.Job, opts JobStatusValidationOptions) field.ErrorList { allErrs := apivalidation.ValidateObjectMetaUpdate(&job.ObjectMeta, &oldJob.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateJobStatusUpdate(job.Status, oldJob.Status)...) + allErrs = append(allErrs, ValidateJobStatusUpdate(job, oldJob, opts)...) return allErrs } @@ -452,6 +549,7 @@ func ValidateJobSpecUpdate(spec, oldSpec batch.JobSpec, fldPath *field.Path, opt allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.CompletionMode, oldSpec.CompletionMode, fldPath.Child("completionMode"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.PodFailurePolicy, oldSpec.PodFailurePolicy, fldPath.Child("podFailurePolicy"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.BackoffLimitPerIndex, oldSpec.BackoffLimitPerIndex, fldPath.Child("backoffLimitPerIndex"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.ManagedBy, oldSpec.ManagedBy, fldPath.Child("managedBy"))...) return allErrs } @@ -486,9 +584,43 @@ func validatePodTemplateUpdate(spec, oldSpec batch.JobSpec, fldPath *field.Path, } // ValidateJobStatusUpdate validates an update to a JobStatus and returns an ErrorList with any errors. -func ValidateJobStatusUpdate(status, oldStatus batch.JobStatus) field.ErrorList { +func ValidateJobStatusUpdate(job, oldJob *batch.Job, opts JobStatusValidationOptions) field.ErrorList { allErrs := field.ErrorList{} - allErrs = append(allErrs, validateJobStatus(&status, field.NewPath("status"))...) + statusFld := field.NewPath("status") + allErrs = append(allErrs, validateJobStatus(job, statusFld, opts)...) + + if opts.RejectDisablingTerminalCondition { + for _, cType := range []batch.JobConditionType{batch.JobFailed, batch.JobComplete, batch.JobFailureTarget} { + if IsConditionTrue(oldJob.Status.Conditions, cType) && !IsConditionTrue(job.Status.Conditions, cType) { + allErrs = append(allErrs, field.Invalid(statusFld.Child("conditions"), field.OmitValueType{}, fmt.Sprintf("cannot disable the terminal %s=True condition", string(cType)))) + } + } + } + if opts.RejectDecreasingFailedCounter { + if job.Status.Failed < oldJob.Status.Failed { + allErrs = append(allErrs, field.Invalid(statusFld.Child("failed"), job.Status.Failed, "cannot decrease the failed counter")) + } + } + if opts.RejectDecreasingSucceededCounter { + if job.Status.Succeeded < oldJob.Status.Succeeded { + allErrs = append(allErrs, field.Invalid(statusFld.Child("succeeded"), job.Status.Succeeded, "cannot decrease the succeeded counter")) + } + } + if opts.RejectMutatingCompletionTime { + // Note that we check the condition only when `job.Status.CompletionTime != nil`, this is because + // we don't want to block transitions to completionTime = nil when the job is not finished yet. + // Setting completionTime = nil for finished jobs is prevented in RejectCompleteJobWithoutCompletionTime. + if job.Status.CompletionTime != nil && oldJob.Status.CompletionTime != nil && !ptr.Equal(job.Status.CompletionTime, oldJob.Status.CompletionTime) { + allErrs = append(allErrs, field.Invalid(statusFld.Child("completionTime"), job.Status.CompletionTime, "completionTime cannot be mutated")) + } + } + if opts.RejectStartTimeUpdateForUnsuspendedJob { + // Note that we check `oldJob.Status.StartTime != nil` to allow transitioning from + // startTime = nil to startTime != nil for unsuspended jobs, which is a desired transition. + if oldJob.Status.StartTime != nil && !ptr.Equal(oldJob.Status.StartTime, job.Status.StartTime) && !ptr.Deref(job.Spec.Suspend, false) { + allErrs = append(allErrs, field.Required(statusFld.Child("startTime"), "startTime cannot be removed for unsuspended job")) + } + } return allErrs } @@ -666,6 +798,124 @@ func validateCompletions(spec, oldSpec batch.JobSpec, fldPath *field.Path, opts return allErrs } +func IsJobFinished(job *batch.Job) bool { + for _, c := range job.Status.Conditions { + if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == api.ConditionTrue { + return true + } + } + return false +} + +func IsJobComplete(job *batch.Job) bool { + return IsConditionTrue(job.Status.Conditions, batch.JobComplete) +} + +func IsJobFailed(job *batch.Job) bool { + return IsConditionTrue(job.Status.Conditions, batch.JobFailed) +} + +func IsConditionTrue(list []batch.JobCondition, cType batch.JobConditionType) bool { + for _, c := range list { + if c.Type == cType && c.Status == api.ConditionTrue { + return true + } + } + return false +} + +func validateFailedIndexesNotOverlapCompleted(completedIndexesStr string, failedIndexesStr string, completions int32) error { + if len(completedIndexesStr) == 0 || len(failedIndexesStr) == 0 { + return nil + } + completedIndexesIntervals := strings.Split(completedIndexesStr, ",") + failedIndexesIntervals := strings.Split(failedIndexesStr, ",") + var completedPos, failedPos int + cX, cY, cErr := parseIndexInterval(completedIndexesIntervals[completedPos], completions) + fX, fY, fErr := parseIndexInterval(failedIndexesIntervals[failedPos], completions) + for completedPos < len(completedIndexesIntervals) && failedPos < len(failedIndexesIntervals) { + if cErr != nil { + // Failure to parse "completed" interval. We go to the next interval, + // the error will be reported to the user when validating the format. + completedPos++ + if completedPos < len(completedIndexesIntervals) { + cX, cY, cErr = parseIndexInterval(completedIndexesIntervals[completedPos], completions) + } + } else if fErr != nil { + // Failure to parse "failed" interval. We go to the next interval, + // the error will be reported to the user when validating the format. + failedPos++ + if failedPos < len(failedIndexesIntervals) { + fX, fY, fErr = parseIndexInterval(failedIndexesIntervals[failedPos], completions) + } + } else { + // We have one failed and one completed interval parsed. + if cX <= fY && fX <= cY { + return fmt.Errorf("failedIndexes and completedIndexes overlap at index: %d", max(cX, fX)) + } + // No overlap, let's move to the next one. + if cX <= fX { + completedPos++ + if completedPos < len(completedIndexesIntervals) { + cX, cY, cErr = parseIndexInterval(completedIndexesIntervals[completedPos], completions) + } + } else { + failedPos++ + if failedPos < len(failedIndexesIntervals) { + fX, fY, fErr = parseIndexInterval(failedIndexesIntervals[failedPos], completions) + } + } + } + } + return nil +} + +func validateIndexesFormat(indexesStr string, completions int32) error { + if len(indexesStr) == 0 { + return nil + } + var lastIndex *int32 + for _, intervalStr := range strings.Split(indexesStr, ",") { + x, y, err := parseIndexInterval(intervalStr, completions) + if err != nil { + return err + } + if lastIndex != nil && *lastIndex >= x { + return fmt.Errorf("non-increasing order, previous: %d, current: %d", *lastIndex, x) + } + lastIndex = &y + } + return nil +} + +func parseIndexInterval(intervalStr string, completions int32) (int32, int32, error) { + limitsStr := strings.Split(intervalStr, "-") + if len(limitsStr) > 2 { + return 0, 0, fmt.Errorf("the fragment %q violates the requirement that an index interval can have at most two parts separated by '-'", intervalStr) + } + x, err := strconv.Atoi(limitsStr[0]) + if err != nil { + return 0, 0, fmt.Errorf("cannot convert string to integer for index: %q", limitsStr[0]) + } + if x >= int(completions) { + return 0, 0, fmt.Errorf("too large index: %q", limitsStr[0]) + } + if len(limitsStr) > 1 { + y, err := strconv.Atoi(limitsStr[1]) + if err != nil { + return 0, 0, fmt.Errorf("cannot convert string to integer for index: %q", limitsStr[1]) + } + if y >= int(completions) { + return 0, 0, fmt.Errorf("too large index: %q", limitsStr[1]) + } + if x >= y { + return 0, 0, fmt.Errorf("non-increasing order, previous: %d, current: %d", x, y) + } + return int32(x), int32(y), nil + } + return int32(x), int32(x), nil +} + type JobValidationOptions struct { apivalidation.PodValidationOptions // Allow mutable node affinity, selector and tolerations of the template @@ -675,3 +925,26 @@ type JobValidationOptions struct { // Require Job to have the label on batch.kubernetes.io/job-name and batch.kubernetes.io/controller-uid RequirePrefixedLabels bool } + +type JobStatusValidationOptions struct { + RejectDecreasingSucceededCounter bool + RejectDecreasingFailedCounter bool + RejectDisablingTerminalCondition bool + RejectInvalidCompletedIndexes bool + RejectInvalidFailedIndexes bool + RejectFailedIndexesOverlappingCompleted bool + RejectCompletedIndexesForNonIndexedJob bool + RejectFailedIndexesForNoBackoffLimitPerIndex bool + RejectMoreReadyThanActivePods bool + RejectFinishedJobWithActivePods bool + RejectFinishedJobWithTerminatingPods bool + RejectFinishedJobWithoutStartTime bool + RejectFinishedJobWithUncountedTerminatedPods bool + RejectStartTimeUpdateForUnsuspendedJob bool + RejectCompletionTimeBeforeStartTime bool + RejectMutatingCompletionTime bool + RejectCompleteJobWithoutCompletionTime bool + RejectNotCompleteJobWithCompletionTime bool + RejectCompleteJobWithFailedCondition bool + RejectCompleteJobWithFailureTargetCondition bool +} diff --git a/pkg/apis/batch/validation/validation_test.go b/pkg/apis/batch/validation/validation_test.go index 59537a1f9f6d..ea09d9643849 100644 --- a/pkg/apis/batch/validation/validation_test.go +++ b/pkg/apis/batch/validation/validation_test.go @@ -17,6 +17,7 @@ limitations under the License. package validation import ( + "errors" _ "time/tzdata" "fmt" @@ -33,6 +34,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" corevalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var ( @@ -380,6 +382,17 @@ func TestValidateJob(t *testing.T) { }, }, }, + "valid managedBy field": { + opts: JobValidationOptions{RequirePrefixedLabels: true}, + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + ManagedBy: ptr.To("example.com/foo"), + }, + }, + }, } for k, v := range successCases { t.Run(k, func(t *testing.T) { @@ -394,6 +407,28 @@ func TestValidateJob(t *testing.T) { opts JobValidationOptions job batch.Job }{ + `spec.managedBy: Too long: may not be longer than 63`: { + opts: JobValidationOptions{RequirePrefixedLabels: true}, + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + ManagedBy: ptr.To("example.com/" + strings.Repeat("x", 60)), + }, + }, + }, + `spec.managedBy: Invalid value: "invalid custom controller name": must be a domain-prefixed path (such as "acme.io/foo")`: { + opts: JobValidationOptions{RequirePrefixedLabels: true}, + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + ManagedBy: ptr.To("invalid custom controller name"), + }, + }, + }, `spec.podFailurePolicy.rules[0]: Invalid value: specifying one of OnExitCodes and OnPodConditions is required`: { job: batch.Job{ ObjectMeta: validJobObjectMeta, @@ -1349,6 +1384,39 @@ func TestValidateJobUpdate(t *testing.T) { job.Spec.ManualSelector = pointer.Bool(true) }, }, + "invalid attempt to set managedBy field": { + old: batch.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + update: func(job *batch.Job) { + job.Spec.ManagedBy = ptr.To("example.com/custom-controller") + }, + err: &field.Error{ + Type: field.ErrorTypeInvalid, + Field: "spec.managedBy", + }, + }, + "invalid update of the managedBy field": { + old: batch.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + ManagedBy: ptr.To("example.com/custom-controller1"), + }, + }, + update: func(job *batch.Job) { + job.Spec.ManagedBy = ptr.To("example.com/custom-controller2") + }, + err: &field.Error{ + Type: field.ErrorTypeInvalid, + Field: "spec.managedBy", + }, + }, "immutable completions for non-indexed jobs": { old: batch.Job{ ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, @@ -2014,6 +2082,8 @@ func TestValidateJobUpdate(t *testing.T) { func TestValidateJobUpdateStatus(t *testing.T) { cases := map[string]struct { + opts JobStatusValidationOptions + old batch.Job update batch.Job wantErrs field.ErrorList @@ -2141,7 +2211,7 @@ func TestValidateJobUpdateStatus(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - errs := ValidateJobUpdateStatus(&tc.update, &tc.old) + errs := ValidateJobUpdateStatus(&tc.update, &tc.old, tc.opts) if diff := cmp.Diff(tc.wantErrs, errs, ignoreErrValueDetail); diff != "" { t.Errorf("Unexpected errors (-want,+got):\n%s", diff) } @@ -3587,3 +3657,161 @@ func TestTimeZones(t *testing.T) { } } } + +func TestValidateIndexesString(t *testing.T) { + testCases := map[string]struct { + indexesString string + completions int32 + wantError error + }{ + "empty is valid": { + indexesString: "", + completions: 6, + }, + "single number is valid": { + indexesString: "1", + completions: 6, + }, + "single interval is valid": { + indexesString: "1-3", + completions: 6, + }, + "mixed intervals valid": { + indexesString: "0,1-3,5,7-10", + completions: 12, + }, + "invalid due to extra space": { + indexesString: "0,1-3, 5", + completions: 6, + wantError: errors.New(`cannot convert string to integer for index: " 5"`), + }, + "invalid due to too large index": { + indexesString: "0,1-3,5", + completions: 5, + wantError: errors.New(`too large index: "5"`), + }, + "invalid due to non-increasing order of intervals": { + indexesString: "1-3,0,5", + completions: 6, + wantError: errors.New(`non-increasing order, previous: 3, current: 0`), + }, + "invalid due to non-increasing order between intervals": { + indexesString: "0,0,5", + completions: 6, + wantError: errors.New(`non-increasing order, previous: 0, current: 0`), + }, + "invalid due to non-increasing order within interval": { + indexesString: "0,1-1,5", + completions: 6, + wantError: errors.New(`non-increasing order, previous: 1, current: 1`), + }, + "invalid due to starting with '-'": { + indexesString: "-1,0", + completions: 6, + wantError: errors.New(`cannot convert string to integer for index: ""`), + }, + "invalid due to ending with '-'": { + indexesString: "0,1-", + completions: 6, + wantError: errors.New(`cannot convert string to integer for index: ""`), + }, + "invalid due to repeated '-'": { + indexesString: "0,1--3", + completions: 6, + wantError: errors.New(`the fragment "1--3" violates the requirement that an index interval can have at most two parts separated by '-'`), + }, + "invalid due to repeated ','": { + indexesString: "0,,1,3", + completions: 6, + wantError: errors.New(`cannot convert string to integer for index: ""`), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + gotErr := validateIndexesFormat(tc.indexesString, tc.completions) + if tc.wantError == nil && gotErr != nil { + t.Errorf("unexpected error: %s", gotErr) + } else if tc.wantError != nil && gotErr == nil { + t.Errorf("missing error: %s", tc.wantError) + } else if tc.wantError != nil && gotErr != nil { + if diff := cmp.Diff(tc.wantError.Error(), gotErr.Error()); diff != "" { + t.Errorf("unexpected error, diff: %s", diff) + } + } + }) + } +} + +func TestValidateFailedIndexesNotOverlapCompleted(t *testing.T) { + testCases := map[string]struct { + completedIndexesStr string + failedIndexesStr string + completions int32 + wantError error + }{ + "empty intervals": { + completedIndexesStr: "", + failedIndexesStr: "", + completions: 6, + }, + "empty completed intervals": { + completedIndexesStr: "", + failedIndexesStr: "1-3", + completions: 6, + }, + "empty failed intervals": { + completedIndexesStr: "1-2", + failedIndexesStr: "", + completions: 6, + }, + "non-overlapping intervals": { + completedIndexesStr: "0,2-4,6-8,12-19", + failedIndexesStr: "1,9-10", + completions: 20, + }, + "overlapping intervals": { + completedIndexesStr: "0,2-4,6-8,12-19", + failedIndexesStr: "1,8,9-10", + completions: 20, + wantError: errors.New("failedIndexes and completedIndexes overlap at index: 8"), + }, + "overlapping intervals, corrupted completed interval skipped": { + completedIndexesStr: "0,2-4,x,6-8,12-19", + failedIndexesStr: "1,8,9-10", + completions: 20, + wantError: errors.New("failedIndexes and completedIndexes overlap at index: 8"), + }, + "overlapping intervals, corrupted failed interval skipped": { + completedIndexesStr: "0,2-4,6-8,12-19", + failedIndexesStr: "1,y,8,9-10", + completions: 20, + wantError: errors.New("failedIndexes and completedIndexes overlap at index: 8"), + }, + "overlapping intervals, first corrupted intervals skipped": { + completedIndexesStr: "x,0,2-4,6-8,12-19", + failedIndexesStr: "y,1,8,9-10", + completions: 20, + wantError: errors.New("failedIndexes and completedIndexes overlap at index: 8"), + }, + "non-overlapping intervals, last intervals corrupted": { + completedIndexesStr: "0,2-4,6-8,12-19,x", + failedIndexesStr: "1,9-10,y", + completions: 20, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + gotErr := validateFailedIndexesNotOverlapCompleted(tc.completedIndexesStr, tc.failedIndexesStr, tc.completions) + if tc.wantError == nil && gotErr != nil { + t.Errorf("unexpected error: %s", gotErr) + } else if tc.wantError != nil && gotErr == nil { + t.Errorf("missing error: %s", tc.wantError) + } else if tc.wantError != nil && gotErr != nil { + if diff := cmp.Diff(tc.wantError.Error(), gotErr.Error()); diff != "" { + t.Errorf("unexpected error, diff: %s", diff) + } + } + }) + } +} diff --git a/pkg/apis/batch/zz_generated.deepcopy.go b/pkg/apis/batch/zz_generated.deepcopy.go index f34516f7b4ac..e29334dadedd 100644 --- a/pkg/apis/batch/zz_generated.deepcopy.go +++ b/pkg/apis/batch/zz_generated.deepcopy.go @@ -308,6 +308,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(PodReplacementPolicy) **out = **in } + if in.ManagedBy != nil { + in, out := &in.ManagedBy, &out.ManagedBy + *out = new(string) + **out = **in + } return } diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index ec4793dc644b..52747150ca30 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -169,7 +169,7 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn if _, err := jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - jm.enqueueSyncJobImmediately(logger, obj) + jm.addJob(logger, obj) }, UpdateFunc: func(oldObj, newObj interface{}) { jm.updateJob(logger, oldObj, newObj) @@ -448,6 +448,17 @@ func (jm *Controller) deletePod(logger klog.Logger, obj interface{}, final bool) jm.enqueueSyncJobBatched(logger, job) } +func (jm *Controller) addJob(logger klog.Logger, obj interface{}) { + jm.enqueueSyncJobImmediately(logger, obj) + jobObj, ok := obj.(*batch.Job) + if !ok { + return + } + if controllerName := managedByExternalController(jobObj); controllerName != nil { + metrics.JobByExternalControllerTotal.WithLabelValues(*controllerName).Inc() + } +} + func (jm *Controller) updateJob(logger klog.Logger, old, cur interface{}) { oldJob := old.(*batch.Job) curJob := cur.(*batch.Job) @@ -545,6 +556,7 @@ func (jm *Controller) enqueueSyncJobInternal(logger klog.Logger, obj interface{} utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err)) return } + // TODO: Handle overlapping controllers better. Either disallow them at admission time or // deterministically avoid syncing controllers that fight over pods. Currently, we only // ensure that the same controller is synced for a given pod. When we periodically relist @@ -636,6 +648,13 @@ func (jm *Controller) syncOrphanPod(ctx context.Context, key string) error { // Make sure the pod is still orphaned. if controllerRef := metav1.GetControllerOf(sharedPod); controllerRef != nil { job := jm.resolveControllerRef(sharedPod.Namespace, controllerRef) + if job != nil { + // Skip cleanup of finalizers for pods owned by a job managed by an external controller + if controllerName := managedByExternalController(job); controllerName != nil { + logger.V(2).Info("Skip cleanup of the job finalizer for a pod owned by a job that is managed by an external controller", "key", key, "podUID", sharedPod.UID, "jobUID", job.UID, "controllerName", controllerName) + return nil + } + } if job != nil && !IsJobFinished(job) { // The pod was adopted. Do not remove finalizer. return nil @@ -732,6 +751,17 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) { } return err } + + // Skip syncing of the job it is managed by another controller. + // We cannot rely solely on skipping of queueing such jobs for synchronization, + // because it is possible a synchronization task is queued for a job, without + // the managedBy field, but the job is quickly replaced by another job with + // the field. Then, the syncJob might be invoked for a job with the field. + if controllerName := managedByExternalController(sharedJob); controllerName != nil { + logger.V(2).Info("Skip syncing the job as it is managed by an external controller", "key", key, "uid", sharedJob.UID, "controllerName", controllerName) + return nil + } + // make a copy so we don't mutate the shared cache job := *sharedJob.DeepCopy() @@ -1934,3 +1964,12 @@ func recordJobPodsCreationTotal(job *batch.Job, jobCtx *syncJobCtx, succeeded, f metrics.JobPodsCreationTotal.WithLabelValues(reason, metrics.Failed).Add(float64(failed)) } } + +func managedByExternalController(jobObj *batch.Job) *string { + if feature.DefaultFeatureGate.Enabled(features.JobManagedBy) { + if controllerName := jobObj.Spec.ManagedBy; controllerName != nil && *controllerName != batch.JobControllerName { + return controllerName + } + } + return nil +} diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index 7f33daa9b62a..e0d2bc69086f 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -2292,6 +2292,126 @@ func TestSyncJobDeleted(t *testing.T) { } } +func TestSyncJobWhenManagedBy(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + now := metav1.Now() + baseJob := batch.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "foobar", + Namespace: metav1.NamespaceDefault, + }, + Spec: batch.JobSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Image: "foo/bar"}, + }, + }, + }, + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](6), + }, + Status: batch.JobStatus{ + Active: 1, + Ready: ptr.To[int32](1), + StartTime: &now, + }, + } + + testCases := map[string]struct { + enableJobManagedBy bool + job batch.Job + wantStatus batch.JobStatus + }{ + "job with custom value of managedBy; feature enabled; the status is unchanged": { + enableJobManagedBy: true, + job: func() batch.Job { + job := baseJob.DeepCopy() + job.Spec.ManagedBy = ptr.To("custom-managed-by") + return *job + }(), + wantStatus: baseJob.Status, + }, + "job with well known value of the managedBy; feature enabled; the status is updated": { + enableJobManagedBy: true, + job: func() batch.Job { + job := baseJob.DeepCopy() + job.Spec.ManagedBy = ptr.To(batch.JobControllerName) + return *job + }(), + wantStatus: batch.JobStatus{ + Active: 2, + Ready: ptr.To[int32](0), + StartTime: &now, + Terminating: ptr.To[int32](0), + UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, + }, + }, + "job with custom value of managedBy; feature disabled; the status is updated": { + job: func() batch.Job { + job := baseJob.DeepCopy() + job.Spec.ManagedBy = ptr.To("custom-managed-by") + return *job + }(), + wantStatus: batch.JobStatus{ + Active: 2, + Ready: ptr.To[int32](0), + StartTime: &now, + Terminating: ptr.To[int32](0), + UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, + }, + }, + "job without the managedBy; feature enabled; the status is updated": { + enableJobManagedBy: true, + job: baseJob, + wantStatus: batch.JobStatus{ + Active: 2, + Ready: ptr.To[int32](0), + StartTime: &now, + Terminating: ptr.To[int32](0), + UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, + }, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, tc.enableJobManagedBy)() + + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + manager.jobStoreSynced = alwaysReady + job := &tc.job + + actual := job + manager.updateStatusHandler = func(_ context.Context, job *batch.Job) (*batch.Job, error) { + actual = job + return job, nil + } + if err := sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job); err != nil { + t.Fatalf("error %v while adding the %v job to the index", err, klog.KObj(job)) + } + + if err := manager.syncJob(ctx, testutil.GetKey(job, t)); err != nil { + t.Fatalf("error %v while reconciling the job %v", err, testutil.GetKey(job, t)) + } + + if diff := cmp.Diff(tc.wantStatus, actual.Status); diff != "" { + t.Errorf("Unexpected job status (-want,+got):\n%s", diff) + } + }) + } +} + func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { _, ctx := ktesting.NewTestContext(t) now := metav1.Now() diff --git a/pkg/controller/job/metrics/metrics.go b/pkg/controller/job/metrics/metrics.go index 39a82f53f9b9..5fe5b760a905 100644 --- a/pkg/controller/job/metrics/metrics.go +++ b/pkg/controller/job/metrics/metrics.go @@ -71,6 +71,20 @@ var ( []string{"completion_mode", "result", "reason"}, ) + // JobByExternalControllerTotal tracks the number of Jobs that were created + // as managed by an external controller. + // The value of the label controller_name corresponds to the value of the + // managedBy field. + JobByExternalControllerTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: JobControllerSubsystem, + Name: "jobs_by_external_controller_total", + Help: "The number of Jobs managed by an external controller", + StabilityLevel: metrics.ALPHA, + }, + []string{"controller_name"}, + ) + // JobPodsFinished records the number of finished Pods that the job controller // finished tracking. // It only applies to Jobs that were created while the feature gate @@ -195,5 +209,6 @@ func Register() { legacyregistry.MustRegister(TerminatedPodsTrackingFinalizerTotal) legacyregistry.MustRegister(JobFinishedIndexesTotal) legacyregistry.MustRegister(JobPodsCreationTotal) + legacyregistry.MustRegister(JobByExternalControllerTotal) }) } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 865e7903ac20..c22193519c3e 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -349,6 +349,13 @@ const ( // Allows users to specify counting of failed pods per index. JobBackoffLimitPerIndex featuregate.Feature = "JobBackoffLimitPerIndex" + // owner: @mimowo + // kep: https://kep.k8s.io/4368 + // alpha: v1.30 + // + // Allows to delegate reconciliation of a Job object to an external controller. + JobManagedBy featuregate.Feature = "JobManagedBy" + // owner: @mimowo // kep: https://kep.k8s.io/3329 // alpha: v1.25 @@ -1048,6 +1055,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS JobBackoffLimitPerIndex: {Default: true, PreRelease: featuregate.Beta}, + JobManagedBy: {Default: false, PreRelease: featuregate.Alpha}, + JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta}, JobPodReplacementPolicy: {Default: true, PreRelease: featuregate.Beta}, diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index f299a8a13258..9fe6fa748912 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -16235,6 +16235,13 @@ func schema_k8sio_api_batch_v1_JobSpec(ref common.ReferenceCallback) common.Open Enum: []interface{}{"Failed", "TerminatingOrFailed"}, }, }, + "managedBy": { + SchemaProps: spec.SchemaProps{ + Description: "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 64 characters.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"template"}, }, @@ -16260,7 +16267,7 @@ func schema_k8sio_api_batch_v1_JobStatus(ref common.ReferenceCallback) common.Op }, }, SchemaProps: spec.SchemaProps{ - Description: "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + Description: "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true.\n\nA job is considered finished when it is in a terminal condition, either \"Complete\" or \"Failed\". At that point, all pods of the job are in terminal phase. Job cannot be both in the \"Complete\" and \"Failed\" conditions. Additionally, it cannot be in the \"Complete\" and \"FailureTarget\" conditions. The \"Complete\", \"Failed\" and \"FailureTarget\" conditions cannot be disabled.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -16274,40 +16281,40 @@ func schema_k8sio_api_batch_v1_JobStatus(ref common.ReferenceCallback) common.Op }, "startTime": { SchemaProps: spec.SchemaProps{ - Description: "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", + Description: "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.\n\nOnce set, the field can only be removed when the job is suspended. The field cannot be modified while the job is unsuspended or finished.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "completionTime": { SchemaProps: spec.SchemaProps{ - Description: "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", + Description: "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is set when the job finishes successfully, and only then. The value cannot be updated or removed. The value indicates the same or later point in time as the startTime field.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "active": { SchemaProps: spec.SchemaProps{ - Description: "The number of pending and running pods which are not terminating (without a deletionTimestamp).", + Description: "The number of pending and running pods which are not terminating (without a deletionTimestamp). The value is zero for finished jobs.", Type: []string{"integer"}, Format: "int32", }, }, "succeeded": { SchemaProps: spec.SchemaProps{ - Description: "The number of pods which reached phase Succeeded.", + Description: "The number of pods which reached phase Succeeded. The value increases monotonically for a given spec. However, it may decrease in reaction to scale down of elastic indexed jobs.", Type: []string{"integer"}, Format: "int32", }, }, "failed": { SchemaProps: spec.SchemaProps{ - Description: "The number of pods which reached phase Failed.", + Description: "The number of pods which reached phase Failed. The value increases monotonically.", Type: []string{"integer"}, Format: "int32", }, }, "terminating": { SchemaProps: spec.SchemaProps{ - Description: "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", + Description: "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp). The value is zero (or null) for finished jobs.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", Type: []string{"integer"}, Format: "int32", }, @@ -16321,20 +16328,20 @@ func schema_k8sio_api_batch_v1_JobStatus(ref common.ReferenceCallback) common.Op }, "failedIndexes": { SchemaProps: spec.SchemaProps{ - Description: "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + Description: "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", Type: []string{"string"}, Format: "", }, }, "uncountedTerminatedPods": { SchemaProps: spec.SchemaProps{ - Description: "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", + Description: "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.", Ref: ref("k8s.io/api/batch/v1.UncountedTerminatedPods"), }, }, "ready": { SchemaProps: spec.SchemaProps{ - Description: "The number of pods which have a Ready condition.", + Description: "The number of pods which have a Ready condition. The value is zero (or null) for finished jobs.", Type: []string{"integer"}, Format: "int32", }, diff --git a/pkg/registry/batch/job/strategy.go b/pkg/registry/batch/job/strategy.go index cb4b2b3a5ffb..45d728c79779 100644 --- a/pkg/registry/batch/job/strategy.go +++ b/pkg/registry/batch/job/strategy.go @@ -44,6 +44,7 @@ import ( batchvalidation "k8s.io/kubernetes/pkg/apis/batch/validation" "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" + "k8s.io/utils/ptr" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) @@ -100,6 +101,9 @@ func (jobStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { if !utilfeature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) { job.Spec.PodFailurePolicy = nil } + if !utilfeature.DefaultFeatureGate.Enabled(features.JobManagedBy) { + job.Spec.ManagedBy = nil + } if !utilfeature.DefaultFeatureGate.Enabled(features.JobBackoffLimitPerIndex) { job.Spec.BackoffLimitPerIndex = nil @@ -331,7 +335,77 @@ func (jobStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime. } func (jobStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { - return batchvalidation.ValidateJobUpdateStatus(obj.(*batch.Job), old.(*batch.Job)) + newJob := obj.(*batch.Job) + oldJob := old.(*batch.Job) + + opts := getStatusValidationOptions(newJob, oldJob) + return batchvalidation.ValidateJobUpdateStatus(newJob, oldJob, opts) +} + +// getStatusValidationOptions returns validation options for Job status +func getStatusValidationOptions(newJob, oldJob *batch.Job) batchvalidation.JobStatusValidationOptions { + if utilfeature.DefaultFeatureGate.Enabled(features.JobManagedBy) { + // A strengthened validation of the Job status transitions is needed since the + // Job managedBy field let's the Job object be controlled by external + // controllers. We want to make sure the transitions done by the external + // controllers meet the expectations of the clients of the Job API. + // For example, we verify that a Job in terminal state (Failed or Complete) + // does not flip to a non-terminal state. + // + // In the checks below we fail validation for Job status fields (or conditions) only if they change their values + // (compared to the oldJob). This allows proceeding with status updates unrelated to the fields violating the + // checks, while blocking bad status updates for jobs with correct status. + // + // Also note, there is another reason we run the validation rules only + // if the associated status fields changed. We do it also because some of + // the validation rules might be temporarily violated just after a user + // updating the spec. In that case we want to give time to the Job + // controller to "fix" the status in the following sync. For example, the + // rule for checking the format of completedIndexes expects them to be + // below .spec.completions, however, this it is ok if the + // status.completedIndexes go beyond completions just after a user scales + // down a Job. + isIndexed := ptr.Deref(newJob.Spec.CompletionMode, batch.NonIndexedCompletion) == batch.IndexedCompletion + + isJobFinishedChanged := batchvalidation.IsJobFinished(oldJob) != batchvalidation.IsJobFinished(newJob) + isJobCompleteChanged := batchvalidation.IsJobComplete(oldJob) != batchvalidation.IsJobComplete(newJob) + isJobFailedChanged := batchvalidation.IsJobFailed(oldJob) != batchvalidation.IsJobFailed(newJob) + isJobFailureTargetChanged := batchvalidation.IsConditionTrue(oldJob.Status.Conditions, batch.JobFailureTarget) != batchvalidation.IsConditionTrue(newJob.Status.Conditions, batch.JobFailureTarget) + isCompletedIndexesChanged := oldJob.Status.CompletedIndexes != newJob.Status.CompletedIndexes + isFailedIndexesChanged := !ptr.Equal(oldJob.Status.FailedIndexes, newJob.Status.FailedIndexes) + isActiveChanged := oldJob.Status.Active != newJob.Status.Active + isReadyChanged := !ptr.Equal(oldJob.Status.Ready, newJob.Status.Ready) + isTerminatingChanged := !ptr.Equal(oldJob.Status.Terminating, newJob.Status.Terminating) + isStartTimeChanged := !ptr.Equal(oldJob.Status.StartTime, newJob.Status.StartTime) + isCompletionTimeChanged := !ptr.Equal(oldJob.Status.CompletionTime, newJob.Status.CompletionTime) + isUncountedTerminatedPodsChanged := !apiequality.Semantic.DeepEqual(oldJob.Status.UncountedTerminatedPods, newJob.Status.UncountedTerminatedPods) + + return batchvalidation.JobStatusValidationOptions{ + // We allow to decrease the counter for succeeded pods for jobs which + // have equal parallelism and completions, as they can be scaled-down. + RejectDecreasingSucceededCounter: !isIndexed || !ptr.Equal(newJob.Spec.Completions, newJob.Spec.Parallelism), + RejectDecreasingFailedCounter: true, + RejectDisablingTerminalCondition: true, + RejectInvalidCompletedIndexes: isCompletedIndexesChanged, + RejectInvalidFailedIndexes: isFailedIndexesChanged, + RejectCompletedIndexesForNonIndexedJob: isCompletedIndexesChanged, + RejectFailedIndexesForNoBackoffLimitPerIndex: isFailedIndexesChanged, + RejectFailedIndexesOverlappingCompleted: isFailedIndexesChanged || isCompletedIndexesChanged, + RejectMoreReadyThanActivePods: isReadyChanged || isActiveChanged, + RejectFinishedJobWithActivePods: isJobFinishedChanged || isActiveChanged, + RejectFinishedJobWithTerminatingPods: isJobFinishedChanged || isTerminatingChanged, + RejectFinishedJobWithoutStartTime: isJobFinishedChanged || isStartTimeChanged, + RejectFinishedJobWithUncountedTerminatedPods: isJobFinishedChanged || isUncountedTerminatedPodsChanged, + RejectStartTimeUpdateForUnsuspendedJob: isStartTimeChanged, + RejectCompletionTimeBeforeStartTime: isStartTimeChanged || isCompletionTimeChanged, + RejectMutatingCompletionTime: true, + RejectNotCompleteJobWithCompletionTime: isJobCompleteChanged || isCompletionTimeChanged, + RejectCompleteJobWithoutCompletionTime: isJobCompleteChanged || isCompletionTimeChanged, + RejectCompleteJobWithFailedCondition: isJobCompleteChanged || isJobFailedChanged, + RejectCompleteJobWithFailureTargetCondition: isJobCompleteChanged || isJobFailureTargetChanged, + } + } + return batchvalidation.JobStatusValidationOptions{} } // WarningsOnUpdate returns warnings for the given update. diff --git a/pkg/registry/batch/job/strategy_test.go b/pkg/registry/batch/job/strategy_test.go index 4c1058d9e200..90ea4ce5b2ea 100644 --- a/pkg/registry/batch/job/strategy_test.go +++ b/pkg/registry/batch/job/strategy_test.go @@ -18,6 +18,7 @@ package job import ( "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -35,6 +36,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var ignoreErrValueDetail = cmpopts.IgnoreFields(field.Error{}, "BadValue", "Detail") @@ -480,6 +482,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { enableJobPodFailurePolicy bool enableJobBackoffLimitPerIndex bool enableJobPodReplacementPolicy bool + enableJobManageBy bool job batch.Job wantJob batch.Job }{ @@ -751,6 +754,47 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { }, }, }, + "managedBy field is dropped when the feature gate is disabled": { + enableJobManageBy: false, + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: validPodTemplateSpec, + ManagedBy: ptr.To("custom-controller-name"), + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(1), + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, + }, + }, + }, + "managedBy field is set when the feature gate is enabled": { + enableJobManageBy: true, + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: validPodTemplateSpec, + ManagedBy: ptr.To("custom-controller-name"), + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(1), + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, + ManagedBy: ptr.To("custom-controller-name"), + }, + }, + }, } for name, tc := range cases { @@ -758,6 +802,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobManagedBy, tc.enableJobManageBy)() ctx := genericapirequest.NewDefaultContext() Strategy.PrepareForCreate(ctx, &tc.job) @@ -1859,11 +1904,21 @@ func TestStatusStrategy_ValidateUpdate(t *testing.T) { Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: api.TerminationMessageReadFile}}, }, } + validObjectMeta := metav1.ObjectMeta{ + Name: "myjob", + Namespace: metav1.NamespaceDefault, + ResourceVersion: "10", + } + now := metav1.Now() + nowPlusMinute := metav1.Time{Time: now.Add(time.Minute)} cases := map[string]struct { - job *batch.Job - newJob *batch.Job - wantJob *batch.Job + enableJobManagedBy bool + + job *batch.Job + newJob *batch.Job + wantJob *batch.Job + wantErrs field.ErrorList }{ "incoming resource version on update should not be mutated": { job: &batch.Job{ @@ -1903,15 +1958,931 @@ func TestStatusStrategy_ValidateUpdate(t *testing.T) { }, }, }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - errs := StatusStrategy.ValidateUpdate(ctx, tc.newJob, tc.job) - if len(errs) != 0 { - t.Errorf("Unexpected error %v", errs) - } - if diff := cmp.Diff(tc.wantJob, tc.newJob); diff != "" { - t.Errorf("Unexpected job (-want,+got):\n%s", diff) + "invalid addition of both Failed=True and Complete=True; allowed because feature gate disabled": { + enableJobManagedBy: false, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + { + Type: batch.JobFailed, + Status: api.ConditionTrue, + }, + }, + }, + }, + }, + "invalid addition of both Failed=True and Complete=True": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + { + Type: batch.JobFailed, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.conditions"}, + }, + }, + "completionTime can be removed to fix still running job": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + }, + }, + }, + "invalid attempt to transition to Failed=True without startTime": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + Conditions: []batch.JobCondition{ + { + Type: batch.JobFailed, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "status.startTime"}, + }, + }, + "invalid attempt to transition to Complete=True without startTime": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "status.startTime"}, + }, + }, + "invalid attempt to transition to Complete=True with active > 0": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + Active: 1, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.active"}, + }, + }, + "invalid attempt to transition to Complete=True with terminating > 0": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + Terminating: ptr.To[int32](1), + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.terminating"}, + }, + }, + "invalid attempt to transition to Failed=True with uncountedTerminatedPods.Failed>0": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + UncountedTerminatedPods: &batch.UncountedTerminatedPods{ + Failed: []types.UID{"a"}, + }, + Conditions: []batch.JobCondition{ + { + Type: batch.JobFailed, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.uncountedTerminatedPods"}, + }, + }, + "invalid attempt to update uncountedTerminatedPods.Succeeded for Complete job": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + UncountedTerminatedPods: &batch.UncountedTerminatedPods{ + Failed: []types.UID{"a"}, + }, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + UncountedTerminatedPods: &batch.UncountedTerminatedPods{ + Failed: []types.UID{"b"}, + }, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.uncountedTerminatedPods"}, + }, + }, + "non-empty uncountedTerminatedPods for complete job, unrelated update": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + UncountedTerminatedPods: &batch.UncountedTerminatedPods{ + Failed: []types.UID{"a"}, + }, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + UncountedTerminatedPods: &batch.UncountedTerminatedPods{ + Failed: []types.UID{"a"}, + }, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + { + Type: batch.JobConditionType("CustomJobCondition"), + Status: api.ConditionTrue, + }, + }, + }, + }, + }, + "invalid attempt to transition to Complete=True with uncountedTerminatedPods.Succeeded>0": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + UncountedTerminatedPods: &batch.UncountedTerminatedPods{ + Succeeded: []types.UID{"a"}, + }, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.uncountedTerminatedPods"}, + }, + }, + "invalid addition Complete=True without setting CompletionTime": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "status.completionTime"}, + }, + }, + "invalid attempt to remove completionTime": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + CompletionTime: nil, + StartTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "status.completionTime"}, + }, + }, + "verify startTime can be cleared for suspended job": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Suspend: ptr.To(true), + }, + Status: batch.JobStatus{ + StartTime: &now, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Suspend: ptr.To(true), + }, + Status: batch.JobStatus{ + StartTime: nil, + }, + }, + }, + "verify startTime cannot be removed for unsuspended job": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: nil, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "status.startTime"}, + }, + }, + "verify startTime cannot be updated for unsuspended job": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &nowPlusMinute, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "status.startTime"}, + }, + }, + "invalid attempt to set completionTime before startTime": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &nowPlusMinute, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &nowPlusMinute, + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.completionTime"}, + }, + }, + "invalid attempt to modify completionTime": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + CompletionTime: &nowPlusMinute, + StartTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.completionTime"}, + }, + }, + "invalid removal of terminal condition Failed=True": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + Conditions: []batch.JobCondition{ + { + Type: batch.JobFailed, + Status: api.ConditionTrue, + }, + }, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.conditions"}, + }, + }, + "invalid removal of terminal condition Complete=True": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.conditions"}, + }, + }, + "invalid removal of terminal condition FailureTarget=True": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + Conditions: []batch.JobCondition{ + { + Type: batch.JobFailureTarget, + Status: api.ConditionTrue, + }, + }, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.conditions"}, + }, + }, + "invalid addition of FailureTarget=True when Complete=True": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + }, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + { + Type: batch.JobFailureTarget, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.conditions"}, + }, + }, + "invalid attempt setting of CompletionTime when there is no Complete condition": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + CompletionTime: &now, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.completionTime"}, + }, + }, + "invalid CompletionTime when there is no Complete condition, but allowed": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + CompletionTime: &now, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + CompletionTime: &now, + Active: 1, + }, + }, + }, + "invalid attempt setting CompletedIndexes when non-indexed completion mode is used": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.NonIndexedCompletion), + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.NonIndexedCompletion), + }, + Status: batch.JobStatus{ + StartTime: &now, + CompletedIndexes: "0", + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.completedIndexes"}, + }, + }, + "invalid because CompletedIndexes set when non-indexed completion mode is used; but allowed": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.NonIndexedCompletion), + }, + Status: batch.JobStatus{ + CompletedIndexes: "0", + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.NonIndexedCompletion), + }, + Status: batch.JobStatus{ + CompletedIndexes: "0", + Active: 1, + }, + }, + }, + "invalid attempt setting FailedIndexes when not backoffLimitPerIndex": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + Status: batch.JobStatus{ + FailedIndexes: ptr.To("0"), + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.failedIndexes"}, + }, + }, + "invalid attempt to decrease the failed counter": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + }, + Status: batch.JobStatus{ + Failed: 3, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + }, + Status: batch.JobStatus{ + Failed: 1, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.failed"}, + }, + }, + "invalid attempt to decrease the succeeded counter": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + }, + Status: batch.JobStatus{ + Succeeded: 3, + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + }, + Status: batch.JobStatus{ + Succeeded: 1, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.succeeded"}, + }, + }, + "invalid attempt to set bad format for CompletedIndexes": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + Status: batch.JobStatus{ + CompletedIndexes: "invalid format", + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.completedIndexes"}, + }, + }, + "invalid format for CompletedIndexes, but allowed": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + Status: batch.JobStatus{ + CompletedIndexes: "invalid format", + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + Status: batch.JobStatus{ + CompletedIndexes: "invalid format", + Active: 1, + }, + }, + }, + "invalid attempt to set bad format for FailedIndexes": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + }, + Status: batch.JobStatus{ + FailedIndexes: ptr.To("invalid format"), + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.failedIndexes"}, + }, + }, + "invalid format for FailedIndexes, but allowed": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + }, + Status: batch.JobStatus{ + FailedIndexes: ptr.To("invalid format"), + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + }, + Status: batch.JobStatus{ + FailedIndexes: ptr.To("invalid format"), + Active: 1, + }, + }, + }, + "invalid attempt to set more ready pods than active": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + }, + Status: batch.JobStatus{ + Active: 1, + Ready: ptr.To[int32](2), + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.ready"}, + }, + }, + "more ready pods than active, but allowed": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + }, + Status: batch.JobStatus{ + Active: 1, + Ready: ptr.To[int32](2), + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + }, + Status: batch.JobStatus{ + Active: 1, + Ready: ptr.To[int32](2), + Succeeded: 1, + }, + }, + }, + "invalid addition of both FailureTarget=True and Complete=True": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Status: batch.JobStatus{ + StartTime: &now, + CompletionTime: &now, + Conditions: []batch.JobCondition{ + { + Type: batch.JobComplete, + Status: api.ConditionTrue, + }, + { + Type: batch.JobFailureTarget, + Status: api.ConditionTrue, + }, + }, + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.conditions"}, + }, + }, + "invalid failedIndexes, which overlap with completedIndexes": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + Status: batch.JobStatus{ + FailedIndexes: ptr.To("0,2"), + CompletedIndexes: "3-4", + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + Status: batch.JobStatus{ + FailedIndexes: ptr.To("0,2"), + CompletedIndexes: "2-4", + }, + }, + wantErrs: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "status.failedIndexes"}, + }, + }, + "failedIndexes overlap with completedIndexes, unrelated field change": { + enableJobManagedBy: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + Status: batch.JobStatus{ + FailedIndexes: ptr.To("0,2"), + CompletedIndexes: "2-4", + }, + }, + newJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Completions: ptr.To[int32](5), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + Status: batch.JobStatus{ + FailedIndexes: ptr.To("0,2"), + CompletedIndexes: "2-4", + Active: 1, + }, + }, + }, + } + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobManagedBy, tc.enableJobManagedBy)() + errs := StatusStrategy.ValidateUpdate(ctx, tc.newJob, tc.job) + if diff := cmp.Diff(tc.wantErrs, errs, ignoreErrValueDetail); diff != "" { + t.Errorf("Unexpected errors (-want,+got):\n%s", diff) + } + if tc.wantJob != nil { + if diff := cmp.Diff(tc.wantJob, tc.newJob); diff != "" { + t.Errorf("Unexpected job (-want,+got):\n%s", diff) + } } }) } diff --git a/staging/src/k8s.io/api/batch/v1/generated.pb.go b/staging/src/k8s.io/api/batch/v1/generated.pb.go index 0f010b7a1075..989c9a0e71af 100644 --- a/staging/src/k8s.io/api/batch/v1/generated.pb.go +++ b/staging/src/k8s.io/api/batch/v1/generated.pb.go @@ -495,119 +495,120 @@ func init() { } var fileDescriptor_79228dc2c4001a22 = []byte{ - // 1783 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0xf7, 0xd8, 0x1e, 0x7b, 0xa6, 0xc6, 0x1f, 0xb3, 0xb5, 0xde, 0xdd, 0xc1, 0x44, 0xd3, 0xce, - 0x6c, 0x12, 0x39, 0x28, 0xf4, 0x64, 0x9d, 0x15, 0xe1, 0x43, 0xa0, 0x6c, 0x7b, 0xd9, 0xb0, 0x66, - 0x9c, 0x1d, 0x6a, 0xbc, 0x20, 0x85, 0x80, 0xa8, 0xe9, 0xae, 0x19, 0x77, 0xb6, 0xa7, 0xab, 0xe9, - 0xaa, 0xb6, 0xd6, 0x17, 0x84, 0xc4, 0x1f, 0x00, 0x7f, 0x05, 0x47, 0x2e, 0x70, 0x86, 0x1b, 0xf2, - 0x31, 0xe2, 0x14, 0x71, 0x68, 0xb1, 0xcd, 0x1f, 0xc0, 0xdd, 0x08, 0x09, 0x55, 0x75, 0x4d, 0x7f, - 0x4d, 0xb7, 0xf1, 0x46, 0x62, 0x95, 0x9b, 0xfb, 0xbd, 0xdf, 0xfb, 0xd5, 0xc7, 0x7b, 0xf5, 0x7b, - 0xcf, 0x03, 0xee, 0x3e, 0xfb, 0x26, 0xd3, 0x6d, 0xda, 0xc7, 0x9e, 0xdd, 0x1f, 0x63, 0x6e, 0x9e, - 0xf6, 0xcf, 0xee, 0xf5, 0xa7, 0xc4, 0x25, 0x3e, 0xe6, 0xc4, 0xd2, 0x3d, 0x9f, 0x72, 0x0a, 0x6f, - 0xc6, 0x20, 0x1d, 0x7b, 0xb6, 0x2e, 0x41, 0xfa, 0xd9, 0xbd, 0xdd, 0xaf, 0x4f, 0x6d, 0x7e, 0x1a, - 0x8c, 0x75, 0x93, 0xce, 0xfa, 0x53, 0x3a, 0xa5, 0x7d, 0x89, 0x1d, 0x07, 0x13, 0xf9, 0x25, 0x3f, - 0xe4, 0x5f, 0x31, 0xc7, 0x6e, 0x2f, 0xb3, 0x90, 0x49, 0x7d, 0x52, 0xb2, 0xce, 0xee, 0xfd, 0x14, - 0x33, 0xc3, 0xe6, 0xa9, 0xed, 0x12, 0xff, 0xbc, 0xef, 0x3d, 0x9b, 0x0a, 0x03, 0xeb, 0xcf, 0x08, - 0xc7, 0x65, 0x51, 0xfd, 0xaa, 0x28, 0x3f, 0x70, 0xb9, 0x3d, 0x23, 0x0b, 0x01, 0xdf, 0xf8, 0x5f, - 0x01, 0xcc, 0x3c, 0x25, 0x33, 0x5c, 0x8c, 0xeb, 0xfd, 0xbb, 0x06, 0xd6, 0x0f, 0x7d, 0xea, 0x1e, - 0xd1, 0x31, 0xfc, 0x05, 0x68, 0x88, 0xfd, 0x58, 0x98, 0xe3, 0x4e, 0x6d, 0xaf, 0xb6, 0xdf, 0x3a, - 0x78, 0x57, 0x4f, 0x6f, 0x29, 0xa1, 0xd5, 0xbd, 0x67, 0x53, 0x61, 0x60, 0xba, 0x40, 0xeb, 0x67, - 0xf7, 0xf4, 0x27, 0xe3, 0x4f, 0x89, 0xc9, 0x8f, 0x09, 0xc7, 0x06, 0xbc, 0x08, 0xb5, 0xa5, 0x28, - 0xd4, 0x40, 0x6a, 0x43, 0x09, 0x2b, 0x34, 0xc0, 0x2a, 0xf3, 0x88, 0xd9, 0x59, 0x96, 0xec, 0x7b, - 0x7a, 0x49, 0x0e, 0x74, 0xb5, 0x9b, 0x91, 0x47, 0x4c, 0x63, 0x43, 0xb1, 0xad, 0x8a, 0x2f, 0x24, - 0x63, 0xe1, 0x11, 0x58, 0x63, 0x1c, 0xf3, 0x80, 0x75, 0x56, 0x24, 0x4b, 0xef, 0x4a, 0x16, 0x89, - 0x34, 0xb6, 0x14, 0xcf, 0x5a, 0xfc, 0x8d, 0x14, 0x43, 0xef, 0x0f, 0x35, 0xd0, 0x52, 0xc8, 0x81, - 0xcd, 0x38, 0xfc, 0x64, 0xe1, 0x06, 0xf4, 0xeb, 0xdd, 0x80, 0x88, 0x96, 0xe7, 0x6f, 0xab, 0x95, - 0x1a, 0x73, 0x4b, 0xe6, 0xf4, 0x0f, 0x40, 0xdd, 0xe6, 0x64, 0xc6, 0x3a, 0xcb, 0x7b, 0x2b, 0xfb, - 0xad, 0x83, 0xd7, 0xae, 0xda, 0xb8, 0xb1, 0xa9, 0x88, 0xea, 0x8f, 0x45, 0x08, 0x8a, 0x23, 0x7b, - 0x7f, 0x5b, 0x4d, 0x36, 0x2c, 0xae, 0x04, 0xbe, 0x03, 0x1a, 0x22, 0xb1, 0x56, 0xe0, 0x10, 0xb9, - 0xe1, 0x66, 0xba, 0x81, 0x91, 0xb2, 0xa3, 0x04, 0x01, 0xf7, 0x41, 0x43, 0xd4, 0xc2, 0xc7, 0xd4, - 0x25, 0x9d, 0x86, 0x44, 0x6f, 0x08, 0xe4, 0x89, 0xb2, 0xa1, 0xc4, 0x0b, 0x9f, 0x82, 0x3b, 0x8c, - 0x63, 0x9f, 0xdb, 0xee, 0xf4, 0x21, 0xc1, 0x96, 0x63, 0xbb, 0x64, 0x44, 0x4c, 0xea, 0x5a, 0x4c, - 0xe6, 0x6e, 0xc5, 0xf8, 0x6a, 0x14, 0x6a, 0x77, 0x46, 0xe5, 0x10, 0x54, 0x15, 0x0b, 0x3f, 0x01, - 0x37, 0x4c, 0xea, 0x9a, 0x81, 0xef, 0x13, 0xd7, 0x3c, 0x1f, 0x52, 0xc7, 0x36, 0xcf, 0x65, 0x1a, - 0x9b, 0x86, 0xae, 0xf6, 0x7d, 0xe3, 0xb0, 0x08, 0xb8, 0x2c, 0x33, 0xa2, 0x45, 0x22, 0xf8, 0x26, - 0x58, 0x67, 0x01, 0xf3, 0x88, 0x6b, 0x75, 0x56, 0xf7, 0x6a, 0xfb, 0x0d, 0xa3, 0x15, 0x85, 0xda, - 0xfa, 0x28, 0x36, 0xa1, 0xb9, 0x0f, 0xfe, 0x14, 0xb4, 0x3e, 0xa5, 0xe3, 0x13, 0x32, 0xf3, 0x1c, - 0xcc, 0x49, 0xa7, 0x2e, 0xf3, 0xfc, 0x46, 0x69, 0x32, 0x8e, 0x52, 0x9c, 0xac, 0xc7, 0x9b, 0x6a, - 0x93, 0xad, 0x8c, 0x03, 0x65, 0xd9, 0xe0, 0xcf, 0xc1, 0x2e, 0x0b, 0x4c, 0x93, 0x30, 0x36, 0x09, - 0x9c, 0x23, 0x3a, 0x66, 0x3f, 0xb0, 0x19, 0xa7, 0xfe, 0xf9, 0xc0, 0x9e, 0xd9, 0xbc, 0xb3, 0xb6, - 0x57, 0xdb, 0xaf, 0x1b, 0xdd, 0x28, 0xd4, 0x76, 0x47, 0x95, 0x28, 0x74, 0x05, 0x03, 0x44, 0xe0, - 0xf6, 0x04, 0xdb, 0x0e, 0xb1, 0x16, 0xb8, 0xd7, 0x25, 0xf7, 0x6e, 0x14, 0x6a, 0xb7, 0x1f, 0x95, - 0x22, 0x50, 0x45, 0x64, 0xef, 0xcf, 0xcb, 0x60, 0x33, 0xf7, 0x5e, 0xe0, 0x0f, 0xc1, 0x1a, 0x36, - 0xb9, 0x7d, 0x26, 0x8a, 0x4a, 0x94, 0xea, 0xdd, 0xec, 0xed, 0x08, 0xa5, 0x4b, 0x5f, 0x3d, 0x22, - 0x13, 0x22, 0x92, 0x40, 0xd2, 0x47, 0xf6, 0x40, 0x86, 0x22, 0x45, 0x01, 0x1d, 0xd0, 0x76, 0x30, - 0xe3, 0xf3, 0x7a, 0x14, 0xd5, 0x26, 0xf3, 0xd3, 0x3a, 0xf8, 0xda, 0xf5, 0x1e, 0x97, 0x88, 0x30, - 0x76, 0xa2, 0x50, 0x6b, 0x0f, 0x0a, 0x3c, 0x68, 0x81, 0x19, 0xfa, 0x00, 0x4a, 0x5b, 0x72, 0x85, - 0x72, 0xbd, 0xfa, 0x4b, 0xaf, 0x77, 0x3b, 0x0a, 0x35, 0x38, 0x58, 0x60, 0x42, 0x25, 0xec, 0xbd, - 0x7f, 0xd5, 0xc0, 0xca, 0xab, 0x11, 0xd0, 0xef, 0xe5, 0x04, 0xf4, 0xb5, 0xaa, 0xa2, 0xad, 0x14, - 0xcf, 0x47, 0x05, 0xf1, 0xec, 0x56, 0x32, 0x5c, 0x2d, 0x9c, 0x7f, 0x5d, 0x01, 0x1b, 0x47, 0x74, - 0x7c, 0x48, 0x5d, 0xcb, 0xe6, 0x36, 0x75, 0xe1, 0x7d, 0xb0, 0xca, 0xcf, 0xbd, 0xb9, 0x08, 0xed, - 0xcd, 0x97, 0x3e, 0x39, 0xf7, 0xc8, 0x65, 0xa8, 0xb5, 0xb3, 0x58, 0x61, 0x43, 0x12, 0x0d, 0x07, - 0xc9, 0x76, 0x96, 0x65, 0xdc, 0xfd, 0xfc, 0x72, 0x97, 0xa1, 0x56, 0xd2, 0x62, 0xf5, 0x84, 0x29, - 0xbf, 0x29, 0x38, 0x05, 0x9b, 0x22, 0x39, 0x43, 0x9f, 0x8e, 0xe3, 0x2a, 0x5b, 0x79, 0xe9, 0xac, - 0xdf, 0x52, 0x1b, 0xd8, 0x1c, 0x64, 0x89, 0x50, 0x9e, 0x17, 0x9e, 0xc5, 0x35, 0x76, 0xe2, 0x63, - 0x97, 0xc5, 0x47, 0xfa, 0x62, 0x35, 0xbd, 0xab, 0x56, 0x93, 0x75, 0x96, 0x67, 0x43, 0x25, 0x2b, - 0xc0, 0xb7, 0xc0, 0x9a, 0x4f, 0x30, 0xa3, 0xae, 0xac, 0xe7, 0x66, 0x9a, 0x1d, 0x24, 0xad, 0x48, - 0x79, 0xe1, 0xdb, 0x60, 0x7d, 0x46, 0x18, 0xc3, 0x53, 0x22, 0x15, 0xa7, 0x69, 0x6c, 0x2b, 0xe0, - 0xfa, 0x71, 0x6c, 0x46, 0x73, 0x7f, 0xef, 0xf7, 0x35, 0xb0, 0xfe, 0x6a, 0xba, 0xdf, 0x77, 0xf3, - 0xdd, 0xaf, 0x53, 0x55, 0x79, 0x15, 0x9d, 0xef, 0xb7, 0x0d, 0xb9, 0x51, 0xd9, 0xf5, 0xee, 0x81, - 0x96, 0x87, 0x7d, 0xec, 0x38, 0xc4, 0xb1, 0xd9, 0x4c, 0xee, 0xb5, 0x6e, 0x6c, 0x0b, 0x5d, 0x1e, - 0xa6, 0x66, 0x94, 0xc5, 0x88, 0x10, 0x93, 0xce, 0x3c, 0x87, 0x88, 0xcb, 0x8c, 0xcb, 0x4d, 0x85, - 0x1c, 0xa6, 0x66, 0x94, 0xc5, 0xc0, 0x27, 0xe0, 0x56, 0xac, 0x60, 0xc5, 0x0e, 0xb8, 0x22, 0x3b, - 0xe0, 0x57, 0xa2, 0x50, 0xbb, 0xf5, 0xa0, 0x0c, 0x80, 0xca, 0xe3, 0xe0, 0x14, 0xb4, 0x3d, 0x6a, - 0x09, 0x71, 0x0e, 0x7c, 0xa2, 0x9a, 0x5f, 0x4b, 0xde, 0xf3, 0x9b, 0xa5, 0x97, 0x31, 0x2c, 0x80, - 0x63, 0x0d, 0x2c, 0x5a, 0xd1, 0x02, 0x29, 0xbc, 0x0f, 0x36, 0xc6, 0xd8, 0x7c, 0x46, 0x27, 0x93, - 0x6c, 0x6b, 0x68, 0x47, 0xa1, 0xb6, 0x61, 0x64, 0xec, 0x28, 0x87, 0x82, 0x03, 0xb0, 0x93, 0xfd, - 0x1e, 0x12, 0xff, 0xb1, 0x6b, 0x91, 0xe7, 0x9d, 0x0d, 0x19, 0xdd, 0x89, 0x42, 0x6d, 0xc7, 0x28, - 0xf1, 0xa3, 0xd2, 0x28, 0xf8, 0x01, 0x68, 0xcf, 0xf0, 0xf3, 0xb8, 0x13, 0x49, 0x0b, 0x61, 0x9d, - 0x4d, 0xc9, 0x24, 0x4f, 0x71, 0x5c, 0xf0, 0xa1, 0x05, 0x34, 0xfc, 0x19, 0x68, 0x30, 0xe2, 0x10, - 0x93, 0x53, 0x5f, 0xbd, 0xad, 0xf7, 0xae, 0x59, 0x8e, 0x78, 0x4c, 0x9c, 0x91, 0x0a, 0x8d, 0x47, - 0x9c, 0xf9, 0x17, 0x4a, 0x28, 0xe1, 0xb7, 0xc1, 0xd6, 0x0c, 0xbb, 0x01, 0x4e, 0x90, 0xf2, 0x51, - 0x35, 0x0c, 0x18, 0x85, 0xda, 0xd6, 0x71, 0xce, 0x83, 0x0a, 0x48, 0xf8, 0x23, 0xd0, 0xe0, 0xf3, - 0xf9, 0x61, 0x4d, 0x6e, 0xad, 0xb4, 0x43, 0x0e, 0xa9, 0x95, 0x1b, 0x1f, 0x92, 0xe7, 0x91, 0xcc, - 0x0e, 0x09, 0x8d, 0x98, 0xb8, 0x38, 0x77, 0x54, 0xa9, 0x3c, 0x98, 0x70, 0xe2, 0x3f, 0xb2, 0x5d, - 0x9b, 0x9d, 0x12, 0x4b, 0x8e, 0x6a, 0xf5, 0x78, 0xe2, 0x3a, 0x39, 0x19, 0x94, 0x41, 0x50, 0x55, - 0x2c, 0x1c, 0x80, 0xad, 0xb4, 0xa6, 0x8f, 0xa9, 0x45, 0x3a, 0x4d, 0xa9, 0x08, 0x6f, 0x88, 0x53, - 0x1e, 0xe6, 0x3c, 0x97, 0x0b, 0x16, 0x54, 0x88, 0xcd, 0x4e, 0x58, 0xe0, 0x8a, 0x09, 0xcb, 0x02, - 0x3b, 0x1e, 0xb5, 0x10, 0xf1, 0x1c, 0x6c, 0x92, 0x19, 0x71, 0xb9, 0x2a, 0xf6, 0x2d, 0xb9, 0xf4, - 0xbb, 0xa2, 0x92, 0x86, 0x25, 0xfe, 0xcb, 0x0a, 0x3b, 0x2a, 0x65, 0xeb, 0xfd, 0xa7, 0x0e, 0x9a, - 0xe9, 0xc8, 0xf2, 0x14, 0x00, 0x73, 0xde, 0x17, 0x98, 0x1a, 0x5b, 0x5e, 0xaf, 0xd2, 0x98, 0xa4, - 0x83, 0xa4, 0xed, 0x36, 0x31, 0x31, 0x94, 0x21, 0x82, 0x3f, 0x01, 0x4d, 0x39, 0xcc, 0x4a, 0x85, - 0x5f, 0x7e, 0x69, 0x85, 0xdf, 0x8c, 0x42, 0xad, 0x39, 0x9a, 0x13, 0xa0, 0x94, 0x0b, 0x4e, 0xb2, - 0x89, 0xf9, 0x82, 0xdd, 0x0a, 0xe6, 0x93, 0x28, 0x97, 0x28, 0xb0, 0x8a, 0x9e, 0xa1, 0x46, 0xb9, - 0x55, 0x59, 0x46, 0x55, 0x53, 0x5a, 0x1f, 0x34, 0xe5, 0xd8, 0x49, 0x2c, 0x62, 0xc9, 0x97, 0x50, - 0x37, 0x6e, 0x28, 0x68, 0x73, 0x34, 0x77, 0xa0, 0x14, 0x23, 0x88, 0xe3, 0x79, 0x52, 0x4d, 0xb5, - 0x09, 0x71, 0xfc, 0x8a, 0x91, 0xf2, 0x0a, 0xe5, 0xe5, 0xc4, 0x9f, 0xd9, 0x2e, 0x16, 0xff, 0x11, - 0x48, 0xc1, 0x53, 0xca, 0x7b, 0x92, 0x9a, 0x51, 0x16, 0x03, 0x1f, 0x82, 0xb6, 0x3a, 0x45, 0xaa, - 0x1d, 0xeb, 0xb2, 0x76, 0x3a, 0x6a, 0x91, 0xf6, 0x61, 0xc1, 0x8f, 0x16, 0x22, 0xe0, 0xfb, 0x60, - 0x73, 0x92, 0x93, 0x1f, 0x20, 0x29, 0x6e, 0x88, 0xf6, 0x9e, 0xd7, 0x9e, 0x3c, 0x0e, 0xfe, 0xa6, - 0x06, 0xee, 0x04, 0xae, 0x49, 0x03, 0x97, 0x13, 0x6b, 0xbe, 0x49, 0x62, 0x0d, 0xa9, 0xc5, 0xe4, - 0x5b, 0x6c, 0x1d, 0xbc, 0x53, 0x5a, 0x58, 0x4f, 0xcb, 0x63, 0xe2, 0x97, 0x5b, 0xe1, 0x44, 0x55, - 0x2b, 0x41, 0x0d, 0xd4, 0x7d, 0x82, 0xad, 0x73, 0xf9, 0x60, 0xeb, 0x46, 0x53, 0x74, 0x44, 0x24, - 0x0c, 0x28, 0xb6, 0xf7, 0xfe, 0x58, 0x03, 0xdb, 0x85, 0x7f, 0x50, 0xbe, 0xfc, 0x13, 0x68, 0x6f, - 0x0c, 0x16, 0x3a, 0x18, 0xfc, 0x08, 0xd4, 0xfd, 0xc0, 0x21, 0xf3, 0x67, 0xfb, 0xf6, 0xb5, 0xba, - 0x21, 0x0a, 0x1c, 0x92, 0xce, 0x0a, 0xe2, 0x8b, 0xa1, 0x98, 0xa6, 0xf7, 0xf7, 0x1a, 0x78, 0xab, - 0x08, 0x7f, 0xe2, 0x7e, 0xff, 0xb9, 0xcd, 0x0f, 0xa9, 0x45, 0x18, 0x22, 0xbf, 0x0c, 0x6c, 0x5f, - 0x4a, 0x89, 0x28, 0x12, 0x93, 0xba, 0x1c, 0x8b, 0x6b, 0xf9, 0x08, 0xcf, 0xe6, 0x03, 0xac, 0x2c, - 0x92, 0xc3, 0xac, 0x03, 0xe5, 0x71, 0x70, 0x04, 0x1a, 0xd4, 0x23, 0x3e, 0x16, 0x8d, 0x23, 0x1e, - 0x5e, 0xdf, 0x9f, 0xab, 0xfb, 0x13, 0x65, 0xbf, 0x0c, 0xb5, 0xbb, 0x57, 0x6c, 0x63, 0x0e, 0x43, - 0x09, 0x11, 0xec, 0x81, 0xb5, 0x33, 0xec, 0x04, 0x44, 0xcc, 0x18, 0x2b, 0xfb, 0x75, 0x03, 0x88, - 0xf7, 0xf4, 0x63, 0x69, 0x41, 0xca, 0xd3, 0xfb, 0x4b, 0xe9, 0xe1, 0x86, 0xd4, 0x4a, 0x15, 0x6c, - 0x88, 0x39, 0x27, 0xbe, 0x0b, 0x3f, 0xcc, 0x0d, 0xe5, 0xef, 0x15, 0x86, 0xf2, 0xbb, 0x25, 0xa3, - 0x75, 0x96, 0xe6, 0xff, 0x35, 0xa7, 0xf7, 0x2e, 0x96, 0xc1, 0x4e, 0x59, 0x36, 0xe1, 0x07, 0xb1, - 0x56, 0x51, 0x57, 0xed, 0x78, 0x3f, 0xab, 0x55, 0xd4, 0xbd, 0x0c, 0xb5, 0xdb, 0xc5, 0xb8, 0xd8, - 0x83, 0x54, 0x1c, 0x74, 0x41, 0x8b, 0xa6, 0x37, 0xac, 0x8a, 0xf4, 0x3b, 0xd7, 0xaa, 0xa7, 0xf2, - 0x02, 0x89, 0x95, 0x2a, 0xeb, 0xcb, 0x2e, 0x00, 0x7f, 0x05, 0xb6, 0x69, 0xfe, 0xee, 0x65, 0xe6, - 0xae, 0xbf, 0x66, 0x59, 0xde, 0x8c, 0x3b, 0xea, 0xdc, 0xdb, 0x05, 0x3f, 0x2a, 0x2e, 0xd6, 0xfb, - 0x53, 0x0d, 0x54, 0x29, 0x0b, 0x1c, 0x66, 0x15, 0x5d, 0xbc, 0xac, 0xa6, 0x71, 0x90, 0x53, 0xf3, - 0xcb, 0x50, 0x7b, 0xbd, 0xea, 0x67, 0x43, 0x91, 0x76, 0xa6, 0x3f, 0x7d, 0xfc, 0x30, 0x2b, 0xf9, - 0x1f, 0x26, 0x92, 0xbf, 0x2c, 0xe9, 0xfa, 0xa9, 0xdc, 0x5f, 0x8f, 0x4b, 0x85, 0x1b, 0xdf, 0xba, - 0x78, 0xd1, 0x5d, 0xfa, 0xec, 0x45, 0x77, 0xe9, 0xf3, 0x17, 0xdd, 0xa5, 0x5f, 0x47, 0xdd, 0xda, - 0x45, 0xd4, 0xad, 0x7d, 0x16, 0x75, 0x6b, 0x9f, 0x47, 0xdd, 0xda, 0x3f, 0xa2, 0x6e, 0xed, 0x77, - 0xff, 0xec, 0x2e, 0x7d, 0x7c, 0xb3, 0xe4, 0x77, 0xdc, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x6d, - 0xdd, 0x3b, 0x38, 0xdd, 0x15, 0x00, 0x00, + // 1804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x6f, 0xe4, 0x48, + 0x15, 0x4f, 0x27, 0xe9, 0xa4, 0xbb, 0x3a, 0x7f, 0x7a, 0x6a, 0x32, 0x33, 0x4d, 0x58, 0xb5, 0xb3, + 0x3d, 0xbb, 0xab, 0x2c, 0x2c, 0xee, 0x9d, 0xec, 0x88, 0xe5, 0x8f, 0x40, 0x3b, 0xce, 0x30, 0xcb, + 0x84, 0xce, 0x4e, 0x53, 0x9d, 0x01, 0x69, 0x59, 0x10, 0xd5, 0x76, 0x75, 0xc7, 0x3b, 0xb6, 0xcb, + 0xd8, 0xe5, 0x68, 0x72, 0x41, 0x48, 0x7c, 0x01, 0x3e, 0x05, 0x47, 0x2e, 0x70, 0x44, 0x70, 0x43, + 0x39, 0xae, 0x38, 0xad, 0x38, 0x58, 0x8c, 0xf9, 0x00, 0xdc, 0x83, 0x90, 0x50, 0x95, 0xcb, 0x7f, + 0xdb, 0x0e, 0x99, 0x95, 0x18, 0x71, 0x8b, 0xdf, 0xfb, 0xbd, 0xdf, 0x7b, 0x55, 0xef, 0xd5, 0x7b, + 0x2f, 0x0d, 0xee, 0x3e, 0xfb, 0x86, 0xaf, 0x9a, 0x74, 0x88, 0x5d, 0x73, 0x38, 0xc5, 0x4c, 0x3f, + 0x1d, 0x9e, 0xdd, 0x1b, 0xce, 0x89, 0x43, 0x3c, 0xcc, 0x88, 0xa1, 0xba, 0x1e, 0x65, 0x14, 0xde, + 0x8c, 0x41, 0x2a, 0x76, 0x4d, 0x55, 0x80, 0xd4, 0xb3, 0x7b, 0xbb, 0x5f, 0x9b, 0x9b, 0xec, 0x34, + 0x98, 0xaa, 0x3a, 0xb5, 0x87, 0x73, 0x3a, 0xa7, 0x43, 0x81, 0x9d, 0x06, 0x33, 0xf1, 0x25, 0x3e, + 0xc4, 0x5f, 0x31, 0xc7, 0xee, 0x20, 0xe7, 0x48, 0xa7, 0x1e, 0xa9, 0xf0, 0xb3, 0x7b, 0x3f, 0xc3, + 0xd8, 0x58, 0x3f, 0x35, 0x1d, 0xe2, 0x9d, 0x0f, 0xdd, 0x67, 0x73, 0x2e, 0xf0, 0x87, 0x36, 0x61, + 0xb8, 0xca, 0x6a, 0x58, 0x67, 0xe5, 0x05, 0x0e, 0x33, 0x6d, 0xb2, 0x60, 0xf0, 0xf5, 0xff, 0x66, + 0xe0, 0xeb, 0xa7, 0xc4, 0xc6, 0x65, 0xbb, 0xc1, 0xbf, 0x1a, 0x60, 0xfd, 0xd0, 0xa3, 0xce, 0x11, + 0x9d, 0xc2, 0x9f, 0x83, 0x16, 0x8f, 0xc7, 0xc0, 0x0c, 0xf7, 0x1a, 0x7b, 0x8d, 0xfd, 0xce, 0xc1, + 0xbb, 0x6a, 0x76, 0x4b, 0x29, 0xad, 0xea, 0x3e, 0x9b, 0x73, 0x81, 0xaf, 0x72, 0xb4, 0x7a, 0x76, + 0x4f, 0x7d, 0x32, 0xfd, 0x94, 0xe8, 0xec, 0x98, 0x30, 0xac, 0xc1, 0x8b, 0x50, 0x59, 0x8a, 0x42, + 0x05, 0x64, 0x32, 0x94, 0xb2, 0x42, 0x0d, 0xac, 0xfa, 0x2e, 0xd1, 0x7b, 0xcb, 0x82, 0x7d, 0x4f, + 0xad, 0xc8, 0x81, 0x2a, 0xa3, 0x99, 0xb8, 0x44, 0xd7, 0x36, 0x24, 0xdb, 0x2a, 0xff, 0x42, 0xc2, + 0x16, 0x1e, 0x81, 0x35, 0x9f, 0x61, 0x16, 0xf8, 0xbd, 0x15, 0xc1, 0x32, 0xb8, 0x92, 0x45, 0x20, + 0xb5, 0x2d, 0xc9, 0xb3, 0x16, 0x7f, 0x23, 0xc9, 0x30, 0xf8, 0x5d, 0x03, 0x74, 0x24, 0x72, 0x64, + 0xfa, 0x0c, 0x7e, 0xb2, 0x70, 0x03, 0xea, 0xf5, 0x6e, 0x80, 0x5b, 0x8b, 0xf3, 0x77, 0xa5, 0xa7, + 0x56, 0x22, 0xc9, 0x9d, 0xfe, 0x01, 0x68, 0x9a, 0x8c, 0xd8, 0x7e, 0x6f, 0x79, 0x6f, 0x65, 0xbf, + 0x73, 0xf0, 0xda, 0x55, 0x81, 0x6b, 0x9b, 0x92, 0xa8, 0xf9, 0x98, 0x9b, 0xa0, 0xd8, 0x72, 0xf0, + 0xd7, 0xd5, 0x34, 0x60, 0x7e, 0x25, 0xf0, 0x1d, 0xd0, 0xe2, 0x89, 0x35, 0x02, 0x8b, 0x88, 0x80, + 0xdb, 0x59, 0x00, 0x13, 0x29, 0x47, 0x29, 0x02, 0xee, 0x83, 0x16, 0xaf, 0x85, 0x8f, 0xa9, 0x43, + 0x7a, 0x2d, 0x81, 0xde, 0xe0, 0xc8, 0x13, 0x29, 0x43, 0xa9, 0x16, 0x3e, 0x05, 0x77, 0x7c, 0x86, + 0x3d, 0x66, 0x3a, 0xf3, 0x87, 0x04, 0x1b, 0x96, 0xe9, 0x90, 0x09, 0xd1, 0xa9, 0x63, 0xf8, 0x22, + 0x77, 0x2b, 0xda, 0x97, 0xa3, 0x50, 0xb9, 0x33, 0xa9, 0x86, 0xa0, 0x3a, 0x5b, 0xf8, 0x09, 0xb8, + 0xa1, 0x53, 0x47, 0x0f, 0x3c, 0x8f, 0x38, 0xfa, 0xf9, 0x98, 0x5a, 0xa6, 0x7e, 0x2e, 0xd2, 0xd8, + 0xd6, 0x54, 0x19, 0xf7, 0x8d, 0xc3, 0x32, 0xe0, 0xb2, 0x4a, 0x88, 0x16, 0x89, 0xe0, 0x9b, 0x60, + 0xdd, 0x0f, 0x7c, 0x97, 0x38, 0x46, 0x6f, 0x75, 0xaf, 0xb1, 0xdf, 0xd2, 0x3a, 0x51, 0xa8, 0xac, + 0x4f, 0x62, 0x11, 0x4a, 0x74, 0xf0, 0x27, 0xa0, 0xf3, 0x29, 0x9d, 0x9e, 0x10, 0xdb, 0xb5, 0x30, + 0x23, 0xbd, 0xa6, 0xc8, 0xf3, 0x1b, 0x95, 0xc9, 0x38, 0xca, 0x70, 0xa2, 0x1e, 0x6f, 0xca, 0x20, + 0x3b, 0x39, 0x05, 0xca, 0xb3, 0xc1, 0x9f, 0x81, 0x5d, 0x3f, 0xd0, 0x75, 0xe2, 0xfb, 0xb3, 0xc0, + 0x3a, 0xa2, 0x53, 0xff, 0xfb, 0xa6, 0xcf, 0xa8, 0x77, 0x3e, 0x32, 0x6d, 0x93, 0xf5, 0xd6, 0xf6, + 0x1a, 0xfb, 0x4d, 0xad, 0x1f, 0x85, 0xca, 0xee, 0xa4, 0x16, 0x85, 0xae, 0x60, 0x80, 0x08, 0xdc, + 0x9e, 0x61, 0xd3, 0x22, 0xc6, 0x02, 0xf7, 0xba, 0xe0, 0xde, 0x8d, 0x42, 0xe5, 0xf6, 0xa3, 0x4a, + 0x04, 0xaa, 0xb1, 0x1c, 0xfc, 0x69, 0x19, 0x6c, 0x16, 0xde, 0x0b, 0xfc, 0x01, 0x58, 0xc3, 0x3a, + 0x33, 0xcf, 0x78, 0x51, 0xf1, 0x52, 0xbd, 0x9b, 0xbf, 0x1d, 0xde, 0xe9, 0xb2, 0x57, 0x8f, 0xc8, + 0x8c, 0xf0, 0x24, 0x90, 0xec, 0x91, 0x3d, 0x10, 0xa6, 0x48, 0x52, 0x40, 0x0b, 0x74, 0x2d, 0xec, + 0xb3, 0xa4, 0x1e, 0x79, 0xb5, 0x89, 0xfc, 0x74, 0x0e, 0xbe, 0x72, 0xbd, 0xc7, 0xc5, 0x2d, 0xb4, + 0x9d, 0x28, 0x54, 0xba, 0xa3, 0x12, 0x0f, 0x5a, 0x60, 0x86, 0x1e, 0x80, 0x42, 0x96, 0x5e, 0xa1, + 0xf0, 0xd7, 0x7c, 0x69, 0x7f, 0xb7, 0xa3, 0x50, 0x81, 0xa3, 0x05, 0x26, 0x54, 0xc1, 0x3e, 0xf8, + 0x67, 0x03, 0xac, 0xbc, 0x9a, 0x06, 0xfa, 0xdd, 0x42, 0x03, 0x7d, 0xad, 0xae, 0x68, 0x6b, 0x9b, + 0xe7, 0xa3, 0x52, 0xf3, 0xec, 0xd7, 0x32, 0x5c, 0xdd, 0x38, 0xff, 0xb2, 0x02, 0x36, 0x8e, 0xe8, + 0xf4, 0x90, 0x3a, 0x86, 0xc9, 0x4c, 0xea, 0xc0, 0xfb, 0x60, 0x95, 0x9d, 0xbb, 0x49, 0x13, 0xda, + 0x4b, 0x5c, 0x9f, 0x9c, 0xbb, 0xe4, 0x32, 0x54, 0xba, 0x79, 0x2c, 0x97, 0x21, 0x81, 0x86, 0xa3, + 0x34, 0x9c, 0x65, 0x61, 0x77, 0xbf, 0xe8, 0xee, 0x32, 0x54, 0x2a, 0x46, 0xac, 0x9a, 0x32, 0x15, + 0x83, 0x82, 0x73, 0xb0, 0xc9, 0x93, 0x33, 0xf6, 0xe8, 0x34, 0xae, 0xb2, 0x95, 0x97, 0xce, 0xfa, + 0x2d, 0x19, 0xc0, 0xe6, 0x28, 0x4f, 0x84, 0x8a, 0xbc, 0xf0, 0x2c, 0xae, 0xb1, 0x13, 0x0f, 0x3b, + 0x7e, 0x7c, 0xa4, 0x2f, 0x56, 0xd3, 0xbb, 0xd2, 0x9b, 0xa8, 0xb3, 0x22, 0x1b, 0xaa, 0xf0, 0x00, + 0xdf, 0x02, 0x6b, 0x1e, 0xc1, 0x3e, 0x75, 0x44, 0x3d, 0xb7, 0xb3, 0xec, 0x20, 0x21, 0x45, 0x52, + 0x0b, 0xdf, 0x06, 0xeb, 0x36, 0xf1, 0x7d, 0x3c, 0x27, 0xa2, 0xe3, 0xb4, 0xb5, 0x6d, 0x09, 0x5c, + 0x3f, 0x8e, 0xc5, 0x28, 0xd1, 0x0f, 0x7e, 0xdb, 0x00, 0xeb, 0xaf, 0x66, 0xfa, 0x7d, 0xa7, 0x38, + 0xfd, 0x7a, 0x75, 0x95, 0x57, 0x33, 0xf9, 0xfe, 0xd8, 0x12, 0x81, 0x8a, 0xa9, 0x77, 0x0f, 0x74, + 0x5c, 0xec, 0x61, 0xcb, 0x22, 0x96, 0xe9, 0xdb, 0x22, 0xd6, 0xa6, 0xb6, 0xcd, 0xfb, 0xf2, 0x38, + 0x13, 0xa3, 0x3c, 0x86, 0x9b, 0xe8, 0xd4, 0x76, 0x2d, 0xc2, 0x2f, 0x33, 0x2e, 0x37, 0x69, 0x72, + 0x98, 0x89, 0x51, 0x1e, 0x03, 0x9f, 0x80, 0x5b, 0x71, 0x07, 0x2b, 0x4f, 0xc0, 0x15, 0x31, 0x01, + 0xbf, 0x14, 0x85, 0xca, 0xad, 0x07, 0x55, 0x00, 0x54, 0x6d, 0x07, 0xe7, 0xa0, 0xeb, 0x52, 0x83, + 0x37, 0xe7, 0xc0, 0x23, 0x72, 0xf8, 0x75, 0xc4, 0x3d, 0xbf, 0x59, 0x79, 0x19, 0xe3, 0x12, 0x38, + 0xee, 0x81, 0x65, 0x29, 0x5a, 0x20, 0x85, 0xf7, 0xc1, 0xc6, 0x14, 0xeb, 0xcf, 0xe8, 0x6c, 0x96, + 0x1f, 0x0d, 0xdd, 0x28, 0x54, 0x36, 0xb4, 0x9c, 0x1c, 0x15, 0x50, 0x70, 0x04, 0x76, 0xf2, 0xdf, + 0x63, 0xe2, 0x3d, 0x76, 0x0c, 0xf2, 0xbc, 0xb7, 0x21, 0xac, 0x7b, 0x51, 0xa8, 0xec, 0x68, 0x15, + 0x7a, 0x54, 0x69, 0x05, 0x3f, 0x00, 0x5d, 0x1b, 0x3f, 0x8f, 0x27, 0x91, 0x90, 0x10, 0xbf, 0xb7, + 0x29, 0x98, 0xc4, 0x29, 0x8e, 0x4b, 0x3a, 0xb4, 0x80, 0x86, 0x3f, 0x05, 0x2d, 0x9f, 0x58, 0x44, + 0x67, 0xd4, 0x93, 0x6f, 0xeb, 0xbd, 0x6b, 0x96, 0x23, 0x9e, 0x12, 0x6b, 0x22, 0x4d, 0xe3, 0x15, + 0x27, 0xf9, 0x42, 0x29, 0x25, 0xfc, 0x16, 0xd8, 0xb2, 0xb1, 0x13, 0xe0, 0x14, 0x29, 0x1e, 0x55, + 0x4b, 0x83, 0x51, 0xa8, 0x6c, 0x1d, 0x17, 0x34, 0xa8, 0x84, 0x84, 0x3f, 0x04, 0x2d, 0x96, 0xec, + 0x0f, 0x6b, 0x22, 0xb4, 0xca, 0x09, 0x39, 0xa6, 0x46, 0x61, 0x7d, 0x48, 0x9f, 0x47, 0xba, 0x3b, + 0xa4, 0x34, 0x7c, 0xe3, 0x62, 0xcc, 0x92, 0xa5, 0xf2, 0x60, 0xc6, 0x88, 0xf7, 0xc8, 0x74, 0x4c, + 0xff, 0x94, 0x18, 0x62, 0x55, 0x6b, 0xc6, 0x1b, 0xd7, 0xc9, 0xc9, 0xa8, 0x0a, 0x82, 0xea, 0x6c, + 0xe1, 0x08, 0x6c, 0x65, 0x35, 0x7d, 0x4c, 0x0d, 0xd2, 0x6b, 0x8b, 0x8e, 0xf0, 0x06, 0x3f, 0xe5, + 0x61, 0x41, 0x73, 0xb9, 0x20, 0x41, 0x25, 0xdb, 0xfc, 0x86, 0x05, 0xae, 0xd8, 0xb0, 0x0c, 0xb0, + 0xe3, 0x52, 0x03, 0x11, 0xd7, 0xc2, 0x3a, 0xb1, 0x89, 0xc3, 0x64, 0xb1, 0x6f, 0x09, 0xd7, 0xef, + 0xf2, 0x4a, 0x1a, 0x57, 0xe8, 0x2f, 0x6b, 0xe4, 0xa8, 0x92, 0x0d, 0x7e, 0x15, 0xb4, 0x6d, 0xec, + 0xe0, 0x39, 0x31, 0xb4, 0xf3, 0xde, 0xb6, 0xa0, 0xde, 0x8c, 0x42, 0xa5, 0x7d, 0x9c, 0x08, 0x51, + 0xa6, 0x1f, 0xfc, 0xbb, 0x09, 0xda, 0xd9, 0x7e, 0xf3, 0x14, 0x00, 0x3d, 0x19, 0x22, 0xbe, 0xdc, + 0x71, 0x5e, 0xaf, 0x6b, 0x48, 0xe9, 0xb8, 0xc9, 0x66, 0x73, 0x2a, 0xf2, 0x51, 0x8e, 0x08, 0xfe, + 0x18, 0xb4, 0xc5, 0xe6, 0x2b, 0xc6, 0xc1, 0xf2, 0x4b, 0x8f, 0x03, 0x11, 0xfd, 0x24, 0x21, 0x40, + 0x19, 0x17, 0x9c, 0xe5, 0xb3, 0xf8, 0x05, 0x47, 0x1b, 0x2c, 0x66, 0x5c, 0xb8, 0x28, 0xb1, 0xf2, + 0x01, 0x23, 0xf7, 0xbe, 0x55, 0x51, 0x73, 0x75, 0x2b, 0xdd, 0x10, 0xb4, 0xc5, 0x8e, 0x4a, 0x0c, + 0x62, 0x88, 0x67, 0xd3, 0xd4, 0x6e, 0x48, 0x68, 0x7b, 0x92, 0x28, 0x50, 0x86, 0xe1, 0xc4, 0xf1, + 0xf2, 0x29, 0x57, 0xe0, 0x94, 0x38, 0x7e, 0xf2, 0x48, 0x6a, 0x79, 0x9b, 0x66, 0xc4, 0xb3, 0x4d, + 0x07, 0xf3, 0x7f, 0x1f, 0x44, 0x77, 0x94, 0x6d, 0xfa, 0x24, 0x13, 0xa3, 0x3c, 0x06, 0x3e, 0x04, + 0x5d, 0x79, 0x8a, 0xac, 0xd1, 0xac, 0x8b, 0x6a, 0xe8, 0x49, 0x27, 0xdd, 0xc3, 0x92, 0x1e, 0x2d, + 0x58, 0xc0, 0xf7, 0xc1, 0xe6, 0xac, 0xd0, 0xab, 0x80, 0xa0, 0xb8, 0xc1, 0x77, 0x81, 0x62, 0xa3, + 0x2a, 0xe2, 0xe0, 0xaf, 0x1b, 0xe0, 0x4e, 0xe0, 0xe8, 0x34, 0x70, 0x18, 0x31, 0x92, 0x20, 0x89, + 0x31, 0xa6, 0x86, 0x2f, 0x1e, 0x6e, 0xe7, 0xe0, 0x9d, 0xca, 0xc2, 0x7a, 0x5a, 0x6d, 0x13, 0x3f, + 0xf3, 0x1a, 0x25, 0xaa, 0xf3, 0x04, 0x15, 0xd0, 0xf4, 0x08, 0x36, 0xce, 0xc5, 0xeb, 0x6e, 0x6a, + 0x6d, 0x3e, 0x3e, 0x11, 0x17, 0xa0, 0x58, 0x3e, 0xf8, 0x7d, 0x03, 0x6c, 0x97, 0xfe, 0x9b, 0xf9, + 0xff, 0x5f, 0x57, 0x07, 0x53, 0xb0, 0x30, 0xee, 0xe0, 0x47, 0xa0, 0xe9, 0x05, 0x16, 0x49, 0x9e, + 0xed, 0xdb, 0xd7, 0x1a, 0x9d, 0x28, 0xb0, 0x48, 0xb6, 0x58, 0xf0, 0x2f, 0x1f, 0xc5, 0x34, 0x83, + 0xbf, 0x35, 0xc0, 0x5b, 0x65, 0xf8, 0x13, 0xe7, 0x7b, 0xcf, 0x4d, 0x76, 0x48, 0x0d, 0xe2, 0x23, + 0xf2, 0x8b, 0xc0, 0xf4, 0x44, 0xdf, 0xe1, 0x45, 0xa2, 0x53, 0x87, 0x61, 0x7e, 0x2d, 0x1f, 0x61, + 0x3b, 0xd9, 0x76, 0x45, 0x91, 0x1c, 0xe6, 0x15, 0xa8, 0x88, 0x83, 0x13, 0xd0, 0xa2, 0x2e, 0xf1, + 0x30, 0x9f, 0x32, 0xf1, 0xa6, 0xfb, 0x7e, 0x32, 0x0a, 0x9e, 0x48, 0xf9, 0x65, 0xa8, 0xdc, 0xbd, + 0x22, 0x8c, 0x04, 0x86, 0x52, 0x22, 0x38, 0x00, 0x6b, 0x67, 0xd8, 0x0a, 0x08, 0x5f, 0x48, 0x56, + 0xf6, 0x9b, 0x1a, 0xe0, 0xef, 0xe9, 0x47, 0x42, 0x82, 0xa4, 0x66, 0xf0, 0xe7, 0xca, 0xc3, 0x8d, + 0xa9, 0x91, 0x75, 0xb0, 0x31, 0x66, 0x8c, 0x78, 0x0e, 0xfc, 0xb0, 0xb0, 0xc1, 0xbf, 0x57, 0xda, + 0xe0, 0xef, 0x56, 0xec, 0xe1, 0x79, 0x9a, 0xff, 0xd5, 0x52, 0x3f, 0xb8, 0x58, 0x06, 0x3b, 0x55, + 0xd9, 0x84, 0x1f, 0xc4, 0xbd, 0x8a, 0x3a, 0x32, 0xe2, 0xfd, 0x7c, 0xaf, 0xa2, 0xce, 0x65, 0xa8, + 0xdc, 0x2e, 0xdb, 0xc5, 0x1a, 0x24, 0xed, 0xa0, 0x03, 0x3a, 0x34, 0xbb, 0x61, 0x59, 0xa4, 0xdf, + 0xbe, 0x56, 0x3d, 0x55, 0x17, 0x48, 0xdc, 0xa9, 0xf2, 0xba, 0xbc, 0x03, 0xf8, 0x4b, 0xb0, 0x4d, + 0x8b, 0x77, 0x2f, 0x32, 0x77, 0x7d, 0x9f, 0x55, 0x79, 0xd3, 0xee, 0xc8, 0x73, 0x6f, 0x97, 0xf4, + 0xa8, 0xec, 0x6c, 0xf0, 0x87, 0x06, 0xa8, 0xeb, 0x2c, 0x70, 0x9c, 0xef, 0xe8, 0xfc, 0x65, 0xb5, + 0xb5, 0x83, 0x42, 0x37, 0xbf, 0x0c, 0x95, 0xd7, 0xeb, 0x7e, 0x63, 0xe4, 0x69, 0xf7, 0xd5, 0xa7, + 0x8f, 0x1f, 0xe6, 0x5b, 0xfe, 0x87, 0x69, 0xcb, 0x5f, 0x16, 0x74, 0xc3, 0xac, 0xdd, 0x5f, 0x8f, + 0x4b, 0x9a, 0x6b, 0xdf, 0xbc, 0x78, 0xd1, 0x5f, 0xfa, 0xec, 0x45, 0x7f, 0xe9, 0xf3, 0x17, 0xfd, + 0xa5, 0x5f, 0x45, 0xfd, 0xc6, 0x45, 0xd4, 0x6f, 0x7c, 0x16, 0xf5, 0x1b, 0x9f, 0x47, 0xfd, 0xc6, + 0xdf, 0xa3, 0x7e, 0xe3, 0x37, 0xff, 0xe8, 0x2f, 0x7d, 0x7c, 0xb3, 0xe2, 0x47, 0xdf, 0xff, 0x04, + 0x00, 0x00, 0xff, 0xff, 0xe0, 0x48, 0x1b, 0x03, 0x0a, 0x16, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -1029,6 +1030,13 @@ func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ManagedBy != nil { + i -= len(*m.ManagedBy) + copy(dAtA[i:], *m.ManagedBy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ManagedBy))) + i-- + dAtA[i] = 0x7a + } if m.PodReplacementPolicy != nil { i -= len(*m.PodReplacementPolicy) copy(dAtA[i:], *m.PodReplacementPolicy) @@ -1690,6 +1698,10 @@ func (m *JobSpec) Size() (n int) { l = len(*m.PodReplacementPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if m.ManagedBy != nil { + l = len(*m.ManagedBy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1968,6 +1980,7 @@ func (this *JobSpec) String() string { `BackoffLimitPerIndex:` + valueToStringGenerated(this.BackoffLimitPerIndex) + `,`, `MaxFailedIndexes:` + valueToStringGenerated(this.MaxFailedIndexes) + `,`, `PodReplacementPolicy:` + valueToStringGenerated(this.PodReplacementPolicy) + `,`, + `ManagedBy:` + valueToStringGenerated(this.ManagedBy) + `,`, `}`, }, "") return s @@ -3657,6 +3670,39 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { s := PodReplacementPolicy(dAtA[iNdEx:postIndex]) m.PodReplacementPolicy = &s iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagedBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ManagedBy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/staging/src/k8s.io/api/batch/v1/generated.proto b/staging/src/k8s.io/api/batch/v1/generated.proto index f050072b7c15..1a9facaadac8 100644 --- a/staging/src/k8s.io/api/batch/v1/generated.proto +++ b/staging/src/k8s.io/api/batch/v1/generated.proto @@ -330,6 +330,20 @@ message JobSpec { // This is on by default. // +optional optional string podReplacementPolicy = 14; + + // ManagedBy field indicates the controller that manages a Job. The k8s Job + // controller reconciles jobs which don't have this field at all or the field + // value is the reserved string `kubernetes.io/job-controller`, but skips + // reconciling Jobs with a custom value for this field. + // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - + // all characters before the first "/" must be a valid subdomain as defined + // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path + // characters as defined by RFC 3986. The value cannot exceed 64 characters. + // + // This field is alpha-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (disabled by default). + // +optional + optional string managedBy = 15; } // JobStatus represents the current state of a Job. @@ -340,6 +354,13 @@ message JobStatus { // status true; when the Job is resumed, the status of this condition will // become false. When a Job is completed, one of the conditions will have // type "Complete" and status true. + // + // A job is considered finished when it is in a terminal condition, either + // "Complete" or "Failed". At that point, all pods of the job are in terminal + // phase. Job cannot be both in the "Complete" and "Failed" conditions. + // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions. + // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -351,31 +372,42 @@ message JobStatus { // Job is created in the suspended state, this field is not set until the // first time it is resumed. This field is reset every time a Job is resumed // from suspension. It is represented in RFC3339 form and is in UTC. + // + // Once set, the field can only be removed when the job is suspended. + // The field cannot be modified while the job is unsuspended or finished. + // // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - // The completion time is only set when the job finishes successfully. + // The completion time is set when the job finishes successfully, and only then. + // The value cannot be updated or removed. The value indicates the same or + // later point in time as the startTime field. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; // The number of pending and running pods which are not terminating (without // a deletionTimestamp). + // The value is zero for finished jobs. // +optional optional int32 active = 4; // The number of pods which reached phase Succeeded. + // The value increases monotonically for a given spec. However, it may + // decrease in reaction to scale down of elastic indexed jobs. // +optional optional int32 succeeded = 5; // The number of pods which reached phase Failed. + // The value increases monotonically. // +optional optional int32 failed = 6; // The number of pods which are terminating (in phase Pending or Running // and have a deletionTimestamp). + // The value is zero (or null) for finished jobs. // // This field is beta-level. The job controller populates the field when // the feature gate JobPodReplacementPolicy is enabled (enabled by default). @@ -392,7 +424,7 @@ message JobStatus { // +optional optional string completedIndexes = 7; - // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. // The indexes are represented in the text format analogous as for the // `completedIndexes` field, ie. they are kept as decimal integers // separated by commas. The numbers are listed in increasing order. Three or @@ -400,6 +432,8 @@ message JobStatus { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". + // The set of failed indexes cannot overlap with the set of completed indexes. + // // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` // feature gate is enabled (enabled by default). // +optional @@ -419,10 +453,12 @@ message JobStatus { // // Old jobs might not be tracked using this field, in which case the field // remains null. + // The structure is empty for finished jobs. // +optional optional UncountedTerminatedPods uncountedTerminatedPods = 8; // The number of pods which have a Ready condition. + // The value is zero (or null) for finished jobs. // +optional optional int32 ready = 9; } diff --git a/staging/src/k8s.io/api/batch/v1/types.go b/staging/src/k8s.io/api/batch/v1/types.go index a48190ae1e93..1209331394c8 100644 --- a/staging/src/k8s.io/api/batch/v1/types.go +++ b/staging/src/k8s.io/api/batch/v1/types.go @@ -57,6 +57,9 @@ const ( // to the pod, which don't count towards the backoff limit, according to the // pod failure policy. When the annotation is absent zero is implied. JobIndexIgnoredFailureCountAnnotation = labelPrefix + "job-index-ignored-failure-count" + // JobControllerName reserved value for the managedBy field for the built-in + // Job controller. + JobControllerName = "kubernetes.io/job-controller" ) // +genclient @@ -410,6 +413,20 @@ type JobSpec struct { // This is on by default. // +optional PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` + + // ManagedBy field indicates the controller that manages a Job. The k8s Job + // controller reconciles jobs which don't have this field at all or the field + // value is the reserved string `kubernetes.io/job-controller`, but skips + // reconciling Jobs with a custom value for this field. + // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - + // all characters before the first "/" must be a valid subdomain as defined + // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path + // characters as defined by RFC 3986. The value cannot exceed 64 characters. + // + // This field is alpha-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (disabled by default). + // +optional + ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"` } // JobStatus represents the current state of a Job. @@ -420,6 +437,13 @@ type JobStatus struct { // status true; when the Job is resumed, the status of this condition will // become false. When a Job is completed, one of the conditions will have // type "Complete" and status true. + // + // A job is considered finished when it is in a terminal condition, either + // "Complete" or "Failed". At that point, all pods of the job are in terminal + // phase. Job cannot be both in the "Complete" and "Failed" conditions. + // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions. + // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -431,31 +455,42 @@ type JobStatus struct { // Job is created in the suspended state, this field is not set until the // first time it is resumed. This field is reset every time a Job is resumed // from suspension. It is represented in RFC3339 form and is in UTC. + // + // Once set, the field can only be removed when the job is suspended. + // The field cannot be modified while the job is unsuspended or finished. + // // +optional StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - // The completion time is only set when the job finishes successfully. + // The completion time is set when the job finishes successfully, and only then. + // The value cannot be updated or removed. The value indicates the same or + // later point in time as the startTime field. // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` // The number of pending and running pods which are not terminating (without // a deletionTimestamp). + // The value is zero for finished jobs. // +optional Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` // The number of pods which reached phase Succeeded. + // The value increases monotonically for a given spec. However, it may + // decrease in reaction to scale down of elastic indexed jobs. // +optional Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` // The number of pods which reached phase Failed. + // The value increases monotonically. // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` // The number of pods which are terminating (in phase Pending or Running // and have a deletionTimestamp). + // The value is zero (or null) for finished jobs. // // This field is beta-level. The job controller populates the field when // the feature gate JobPodReplacementPolicy is enabled (enabled by default). @@ -472,7 +507,7 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` - // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. // The indexes are represented in the text format analogous as for the // `completedIndexes` field, ie. they are kept as decimal integers // separated by commas. The numbers are listed in increasing order. Three or @@ -480,6 +515,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". + // The set of failed indexes cannot overlap with the set of completed indexes. + // // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` // feature gate is enabled (enabled by default). // +optional @@ -499,10 +536,12 @@ type JobStatus struct { // // Old jobs might not be tracked using this field, in which case the field // remains null. + // The structure is empty for finished jobs. // +optional UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` // The number of pods which have a Ready condition. + // The value is zero (or null) for finished jobs. // +optional Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } diff --git a/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 911c436f8d47..c2f9fe0f8ce1 100644 --- a/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -126,6 +126,7 @@ var map_JobSpec = map[string]string{ "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", + "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 64 characters.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", } func (JobSpec) SwaggerDoc() map[string]string { @@ -134,17 +135,17 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", - "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", - "active": "The number of pending and running pods which are not terminating (without a deletionTimestamp).", - "succeeded": "The number of pods which reached phase Succeeded.", - "failed": "The number of pods which reached phase Failed.", - "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", + "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true.\n\nA job is considered finished when it is in a terminal condition, either \"Complete\" or \"Failed\". At that point, all pods of the job are in terminal phase. Job cannot be both in the \"Complete\" and \"Failed\" conditions. Additionally, it cannot be in the \"Complete\" and \"FailureTarget\" conditions. The \"Complete\", \"Failed\" and \"FailureTarget\" conditions cannot be disabled.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.\n\nOnce set, the field can only be removed when the job is suspended. The field cannot be modified while the job is unsuspended or finished.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is set when the job finishes successfully, and only then. The value cannot be updated or removed. The value indicates the same or later point in time as the startTime field.", + "active": "The number of pending and running pods which are not terminating (without a deletionTimestamp). The value is zero for finished jobs.", + "succeeded": "The number of pods which reached phase Succeeded. The value increases monotonically for a given spec. However, it may decrease in reaction to scale down of elastic indexed jobs.", + "failed": "The number of pods which reached phase Failed. The value increases monotonically.", + "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp). The value is zero (or null) for finished jobs.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", - "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", - "ready": "The number of pods which have a Ready condition.", + "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.", + "ready": "The number of pods which have a Ready condition. The value is zero (or null) for finished jobs.", } func (JobStatus) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 43fc41515be1..bf3682968e16 100644 --- a/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -308,6 +308,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(PodReplacementPolicy) **out = **in } + if in.ManagedBy != nil { + in, out := &in.ManagedBy, &out.ManagedBy + *out = new(string) + **out = **in + } return } diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json index f50501671e64..3a303983ac8c 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json @@ -1822,7 +1822,8 @@ "ttlSecondsAfterFinished": 8, "completionMode": "completionModeValue", "suspend": true, - "podReplacementPolicy": "podReplacementPolicyValue" + "podReplacementPolicy": "podReplacementPolicyValue", + "managedBy": "managedByValue" } }, "successfulJobsHistoryLimit": 6, diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb index 4cf754ea21e3..4e407827b451 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml index 6287dfc4412f..93f35eb164b9 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml @@ -74,6 +74,7 @@ spec: backoffLimitPerIndex: 12 completionMode: completionModeValue completions: 2 + managedBy: managedByValue manualSelector: true maxFailedIndexes: 13 parallelism: 1 diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json index ecc93c80c5bd..de9f8782db95 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json @@ -1773,7 +1773,8 @@ "ttlSecondsAfterFinished": 8, "completionMode": "completionModeValue", "suspend": true, - "podReplacementPolicy": "podReplacementPolicyValue" + "podReplacementPolicy": "podReplacementPolicyValue", + "managedBy": "managedByValue" }, "status": { "conditions": [ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb index 80a5ce4877ae..c2c737774a6b 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml index 7f5b79c7cf9d..4b95df6f0127 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml @@ -38,6 +38,7 @@ spec: backoffLimitPerIndex: 12 completionMode: completionModeValue completions: 2 + managedBy: managedByValue manualSelector: true maxFailedIndexes: 13 parallelism: 1 diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json index f5a5741578a5..b6f8275eab16 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json @@ -1822,7 +1822,8 @@ "ttlSecondsAfterFinished": 8, "completionMode": "completionModeValue", "suspend": true, - "podReplacementPolicy": "podReplacementPolicyValue" + "podReplacementPolicy": "podReplacementPolicyValue", + "managedBy": "managedByValue" } }, "successfulJobsHistoryLimit": 6, diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb index 2032e0b40a52..54a9b31ba986 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml index 784d3c04dd91..bf36262f91a2 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml @@ -74,6 +74,7 @@ spec: backoffLimitPerIndex: 12 completionMode: completionModeValue completions: 2 + managedBy: managedByValue manualSelector: true maxFailedIndexes: 13 parallelism: 1 diff --git a/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go index 3d46a3ecf9b1..491b3361594f 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go @@ -41,6 +41,7 @@ type JobSpecApplyConfiguration struct { CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"` Suspend *bool `json:"suspend,omitempty"` PodReplacementPolicy *batchv1.PodReplacementPolicy `json:"podReplacementPolicy,omitempty"` + ManagedBy *string `json:"managedBy,omitempty"` } // JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with @@ -160,3 +161,11 @@ func (b *JobSpecApplyConfiguration) WithPodReplacementPolicy(value batchv1.PodRe b.PodReplacementPolicy = &value return b } + +// WithManagedBy sets the ManagedBy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagedBy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithManagedBy(value string) *JobSpecApplyConfiguration { + b.ManagedBy = &value + return b +} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go index 37589e6b385e..99c14e94121b 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -3599,6 +3599,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: completions type: scalar: numeric + - name: managedBy + type: + scalar: string - name: manualSelector type: scalar: boolean diff --git a/test/integration/job/job_test.go b/test/integration/job/job_test.go index c0b6be2fa201..e61a9e2b640a 100644 --- a/test/integration/job/job_test.go +++ b/test/integration/job/job_test.go @@ -1174,6 +1174,335 @@ func TestBackoffLimitPerIndex(t *testing.T) { } } +// TestManagedBy verifies the Job controller correctly makes a decision to +// reconcile or skip reconciliation of the Job depending on the Job's managedBy +// field, and the enablement of the JobManagedBy feature gate. +func TestManagedBy(t *testing.T) { + customControllerName := "example.com/custom-job-controller" + podTemplateSpec := v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "main-container", + Image: "foo", + }, + }, + }, + } + testCases := map[string]struct { + enableJobManagedBy bool + job batchv1.Job + wantReconciledByBuiltInController bool + wantJobByExternalControllerTotalMetric metricLabelsWithValue + }{ + "the Job controller reconciles jobs without the managedBy": { + enableJobManagedBy: true, + job: batchv1.Job{ + Spec: batchv1.JobSpec{ + Template: podTemplateSpec, + }, + }, + wantReconciledByBuiltInController: true, + wantJobByExternalControllerTotalMetric: metricLabelsWithValue{ + // There is no good label value choice to check here, since the + // values wasn't specified. Let's go with checking for the reserved + // value just so that all test cases verify the metric. + Labels: []string{batchv1.JobControllerName}, + Value: 0, + }, + }, + "the Job controller reconciles jobs with the well known value of the managedBy field": { + enableJobManagedBy: true, + job: batchv1.Job{ + Spec: batchv1.JobSpec{ + Template: podTemplateSpec, + ManagedBy: ptr.To(batchv1.JobControllerName), + }, + }, + wantReconciledByBuiltInController: true, + wantJobByExternalControllerTotalMetric: metricLabelsWithValue{ + Labels: []string{batchv1.JobControllerName}, + Value: 0, + }, + }, + "the Job controller reconciles an unsuspended with the custom value of managedBy; feature disabled": { + enableJobManagedBy: false, + job: batchv1.Job{ + Spec: batchv1.JobSpec{ + Template: podTemplateSpec, + ManagedBy: ptr.To(customControllerName), + }, + }, + wantReconciledByBuiltInController: true, + wantJobByExternalControllerTotalMetric: metricLabelsWithValue{ + Labels: []string{customControllerName}, + Value: 0, + }, + }, + "the Job controller does not reconcile an unsuspended with the custom value of managedBy": { + enableJobManagedBy: true, + job: batchv1.Job{ + Spec: batchv1.JobSpec{ + Suspend: ptr.To(false), + Template: podTemplateSpec, + ManagedBy: ptr.To(customControllerName), + }, + }, + wantReconciledByBuiltInController: false, + wantJobByExternalControllerTotalMetric: metricLabelsWithValue{ + Labels: []string{customControllerName}, + Value: 1, + }, + }, + "the Job controller does not reconcile a suspended with the custom value of managedBy": { + enableJobManagedBy: true, + job: batchv1.Job{ + Spec: batchv1.JobSpec{ + Suspend: ptr.To(true), + Template: podTemplateSpec, + ManagedBy: ptr.To(customControllerName), + }, + }, + wantReconciledByBuiltInController: false, + wantJobByExternalControllerTotalMetric: metricLabelsWithValue{ + Labels: []string{customControllerName}, + Value: 1, + }, + }, + } + for name, test := range testCases { + t.Run(name, func(t *testing.T) { + resetMetrics() + defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, test.enableJobManagedBy)() + + closeFn, restConfig, clientSet, ns := setup(t, "managed-by") + defer closeFn() + ctx, cancel := startJobControllerAndWaitForCaches(t, restConfig) + defer cancel() + jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &test.job) + if err != nil { + t.Fatalf("Error %v while creating the job %q", err, klog.KObj(jobObj)) + } + + if test.wantReconciledByBuiltInController { + validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ + Active: int(*jobObj.Spec.Parallelism), + Ready: ptr.To[int32](0), + Terminating: ptr.To[int32](0), + }) + validateCounterMetric(ctx, t, metrics.JobByExternalControllerTotal, test.wantJobByExternalControllerTotalMetric) + } else { + validateCounterMetric(ctx, t, metrics.JobByExternalControllerTotal, test.wantJobByExternalControllerTotalMetric) + + // Await for a little bit to verify the reconciliation does not + // happen. We wait 100ms for the sync itself, because we already + // checked the metric is incremented so the sync would start + // immediately if it was queued. + time.Sleep(100 * time.Millisecond) + jobObj, err = clientSet.BatchV1().Jobs(jobObj.Namespace).Get(ctx, jobObj.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error %v when getting the latest job %v", err, klog.KObj(jobObj)) + } + if diff := cmp.Diff(batchv1.JobStatus{}, jobObj.Status); diff != "" { + t.Fatalf("Unexpected status (-want/+got): %s", diff) + } + } + }) + } +} + +// TestManagedBy_Reenabling verifies handling a Job with a custom value of the +// managedBy field by the Job controller, as the JobManagedBy feature gate is +// disabled and reenabled again. First, when the feature gate is enabled, the +// synchronization is skipped, when it is disabled the synchronization is starts, +// and is disabled again with re-enabling of the feature gate. +func TestManagedBy_Reenabling(t *testing.T) { + customControllerName := "example.com/custom-job-controller" + defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, true)() + + closeFn, restConfig, clientSet, ns := setup(t, "managed-by-reenabling") + defer closeFn() + ctx, cancel := startJobControllerAndWaitForCaches(t, restConfig) + defer func() { + cancel() + }() + resetMetrics() + + baseJob := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-job-test", + Namespace: ns.Name, + }, + Spec: batchv1.JobSpec{ + Completions: ptr.To[int32](1), + Parallelism: ptr.To[int32](1), + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "main-container", + Image: "foo", + }, + }, + }, + }, + ManagedBy: &customControllerName, + }, + } + jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &baseJob) + if err != nil { + t.Fatalf("Error %v when creating the job %q", err, klog.KObj(jobObj)) + } + jobClient := clientSet.BatchV1().Jobs(jobObj.Namespace) + + validateCounterMetric(ctx, t, metrics.JobByExternalControllerTotal, metricLabelsWithValue{ + Labels: []string{customControllerName}, + Value: 1, + }) + + // Await for a little bit to verify the reconciliation does not happen. + // We wait 1s to account for queued sync delay plus 100ms for the sync itself. + time.Sleep(time.Second + 100*time.Millisecond) + jobObj, err = jobClient.Get(ctx, jobObj.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error %v when getting the latest job %v", err, klog.KObj(jobObj)) + } + if diff := cmp.Diff(batchv1.JobStatus{}, jobObj.Status); diff != "" { + t.Fatalf("Unexpected status (-want/+got): %s", diff) + } + + // Disable the feature gate and restart the controller + defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, false)() + cancel() + resetMetrics() + ctx, cancel = startJobControllerAndWaitForCaches(t, restConfig) + + // Verify the built-in controller reconciles the Job + validateJobsPodsStatusOnly(ctx, t, clientSet, jobObj, podsByStatus{ + Active: 1, + Ready: ptr.To[int32](0), + Terminating: ptr.To[int32](0), + }) + + validateCounterMetric(ctx, t, metrics.JobByExternalControllerTotal, metricLabelsWithValue{ + Labels: []string{customControllerName}, + Value: 0, + }) + + // Reenable the feature gate and restart the controller + defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, true)() + cancel() + resetMetrics() + ctx, cancel = startJobControllerAndWaitForCaches(t, restConfig) + + // Marking the pod as finished, but + if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 1); err != nil { + t.Fatalf("Error %v when setting phase %s on the pod of job %v", err, v1.PodSucceeded, klog.KObj(jobObj)) + } + + // Await for a little bit to verify the reconciliation does not happen. + // We wait 1s to account for queued sync delay plus 100ms for the sync itself. + time.Sleep(time.Second + 100*time.Millisecond) + + validateCounterMetric(ctx, t, metrics.JobByExternalControllerTotal, metricLabelsWithValue{ + Labels: []string{customControllerName}, + Value: 1, + }) + + // Verify the built-in controller does not reconcile the Job. It is up to + // the external controller to update the status. + validateJobsPodsStatusOnly(ctx, t, clientSet, jobObj, podsByStatus{ + Active: 1, + Ready: ptr.To[int32](0), + Terminating: ptr.To[int32](0), + }) +} + +// TestManagedBy_RecreatedJob verifies that the Job controller skips +// reconciliation of a job with managedBy field, when this is a recreated job, +// and there is still a pending sync queued for the previous job. +// In this scenario we first create a job without managedBy field, and we mark +// its pod as succeeded. This queues the Job object sync with 1s delay. Then, +// without waiting for the Job status update we delete and recreate the job under +// the same name, but with managedBy field. The queued update starts to execute +// on the new job, but is skipped. +func TestManagedBy_RecreatedJob(t *testing.T) { + customControllerName := "example.com/custom-job-controller" + defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, true)() + + closeFn, restConfig, clientSet, ns := setup(t, "managed-by-recreate-job") + defer closeFn() + ctx, cancel := startJobControllerAndWaitForCaches(t, restConfig) + defer cancel() + resetMetrics() + + baseJob := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-job-test", + Namespace: ns.Name, + }, + Spec: batchv1.JobSpec{ + Completions: ptr.To[int32](1), + Parallelism: ptr.To[int32](1), + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "main-container", + Image: "foo", + }, + }, + }, + }, + }, + } + jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &baseJob) + if err != nil { + t.Fatalf("Error %v when creating the job %q", err, klog.KObj(jobObj)) + } + validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ + Active: 1, + Ready: ptr.To[int32](0), + Terminating: ptr.To[int32](0), + }) + + // Marking the pod as complete queues the job reconciliation + if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 1); err != nil { + t.Fatalf("Error %v when setting phase %s on the pod of job %v", err, v1.PodSucceeded, klog.KObj(jobObj)) + } + + jobClient := clientSet.BatchV1().Jobs(jobObj.Namespace) + if err = jobClient.Delete(ctx, jobObj.Name, metav1.DeleteOptions{ + // Use propagationPolicy=background so that we don't need to wait for the job object to be gone. + PropagationPolicy: ptr.To(metav1.DeletePropagationBackground), + }); err != nil { + t.Fatalf("Error %v when deleting the job %v", err, klog.KObj(jobObj)) + } + + jobWithManagedBy := baseJob.DeepCopy() + jobWithManagedBy.Spec.ManagedBy = ptr.To(customControllerName) + jobObj, err = createJobWithDefaults(ctx, clientSet, ns.Name, jobWithManagedBy) + if err != nil { + t.Fatalf("Error %q while creating the job %q", err, klog.KObj(jobObj)) + } + + validateCounterMetric(ctx, t, metrics.JobByExternalControllerTotal, metricLabelsWithValue{ + Labels: []string{customControllerName}, + Value: 1, + }) + + // Await for a little bit to verify the reconciliation does not happen. + // We wait 1s to account for queued sync delay plus 100ms for the sync itself. + time.Sleep(time.Second + 100*time.Millisecond) + jobObj, err = jobClient.Get(ctx, jobObj.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error %v when getting the latest job %v", err, klog.KObj(jobObj)) + } + if diff := cmp.Diff(batchv1.JobStatus{}, jobObj.Status); diff != "" { + t.Fatalf("Unexpected status (-want/+got): %s", diff) + } +} + func getIndexFailureCount(p *v1.Pod) (int, error) { if p.Annotations == nil { return 0, errors.New("no annotations found") @@ -3155,6 +3484,7 @@ func resetMetrics() { metrics.PodFailuresHandledByFailurePolicy.Reset() metrics.JobFinishedIndexesTotal.Reset() metrics.JobPodsCreationTotal.Reset() + metrics.JobByExternalControllerTotal.Reset() } func createJobControllerWithSharedInformers(tb testing.TB, restConfig *restclient.Config, informerSet informers.SharedInformerFactory) (*jobcontroller.Controller, context.Context, context.CancelFunc) {