diff --git a/pkg/cleaning/allow_list/kubernetes.go b/pkg/cleaning/allow_list/kubernetes.go index d0958e8fd2..dc7dbd10b8 100644 --- a/pkg/cleaning/allow_list/kubernetes.go +++ b/pkg/cleaning/allow_list/kubernetes.go @@ -13,70 +13,93 @@ import ( "github.com/werf/logboek" ) -func DeployedDockerImages(ctx context.Context, kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var deployedDockerImages []string +type DeployedImage struct { + Name string + ResourcesNames []string +} + +func AppendDeployedImages(deployedImages []*DeployedImage, newDeployedImages ...*DeployedImage) (res []*DeployedImage) { + for _, desc := range deployedImages { + res = append(res, &DeployedImage{ + Name: desc.Name, + ResourcesNames: desc.ResourcesNames, + }) + } + +AppendNewImages: + for _, newDesc := range newDeployedImages { + for _, desc := range res { + if desc.Name == newDesc.Name { + desc.ResourcesNames = append(desc.ResourcesNames, newDesc.ResourcesNames...) + continue AppendNewImages + } + } + + res = append(res, &DeployedImage{ + Name: newDesc.Name, + ResourcesNames: newDesc.ResourcesNames, + }) + } + + return +} + +func DeployedDockerImages(ctx context.Context, kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var deployedDockerImages []*DeployedImage images, err := getPodsImages(kubernetesClient, kubernetesNamespace) if err != nil { return nil, fmt.Errorf("cannot get Pods images: %w", err) } - - deployedDockerImages = append(deployedDockerImages, images...) + deployedDockerImages = AppendDeployedImages(deployedDockerImages, images...) images, err = getReplicationControllersImages(kubernetesClient, kubernetesNamespace) if err != nil { return nil, fmt.Errorf("cannot get ReplicationControllers images: %w", err) } - - deployedDockerImages = append(deployedDockerImages, images...) + deployedDockerImages = AppendDeployedImages(deployedDockerImages, images...) images, err = getDeploymentsImages(kubernetesClient, kubernetesNamespace) if err != nil { return nil, fmt.Errorf("cannot get Deployments images: %w", err) } - - deployedDockerImages = append(deployedDockerImages, images...) + deployedDockerImages = AppendDeployedImages(deployedDockerImages, images...) images, err = getStatefulSetsImages(kubernetesClient, kubernetesNamespace) if err != nil { return nil, fmt.Errorf("cannot get StatefulSets images: %w", err) } - - deployedDockerImages = append(deployedDockerImages, images...) + deployedDockerImages = AppendDeployedImages(deployedDockerImages, images...) images, err = getDaemonSetsImages(kubernetesClient, kubernetesNamespace) if err != nil { return nil, fmt.Errorf("cannot get DaemonSets images: %w", err) } - - deployedDockerImages = append(deployedDockerImages, images...) + deployedDockerImages = AppendDeployedImages(deployedDockerImages, images...) images, err = getReplicaSetsImages(kubernetesClient, kubernetesNamespace) if err != nil { return nil, fmt.Errorf("cannot get ReplicaSets images: %w", err) } - - deployedDockerImages = append(deployedDockerImages, images...) + deployedDockerImages = AppendDeployedImages(deployedDockerImages, images...) images, err = getCronJobsImages(ctx, kubernetesClient, kubernetesNamespace) if err != nil { return nil, fmt.Errorf("cannot get CronJobs images: %w", err) } - - deployedDockerImages = append(deployedDockerImages, images...) + deployedDockerImages = AppendDeployedImages(deployedDockerImages, images...) images, err = getJobsImages(ctx, kubernetesClient, kubernetesNamespace) if err != nil { return nil, fmt.Errorf("cannot get Jobs images: %w", err) } - - deployedDockerImages = append(deployedDockerImages, images...) + deployedDockerImages = AppendDeployedImages(deployedDockerImages, images...) return deployedDockerImages, nil } -func getPodsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getPodsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.CoreV1().Pods(kubernetesNamespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return nil, err @@ -87,15 +110,18 @@ func getPodsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace st pod.Spec.Containers, pod.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s pod/%s container/%s", pod.Namespace, pod.Name, container.Name)}, + }) } } return images, nil } -func getReplicationControllersImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getReplicationControllersImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.CoreV1().ReplicationControllers(kubernetesNamespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return nil, err @@ -106,15 +132,18 @@ func getReplicationControllersImages(kubernetesClient kubernetes.Interface, kube replicationController.Spec.Template.Spec.Containers, replicationController.Spec.Template.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s rc/%s container/%s", replicationController.Namespace, replicationController.Name, container.Name)}, + }) } } return images, nil } -func getDeploymentsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getDeploymentsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.AppsV1().Deployments(kubernetesNamespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return nil, err @@ -125,15 +154,18 @@ func getDeploymentsImages(kubernetesClient kubernetes.Interface, kubernetesNames deployment.Spec.Template.Spec.Containers, deployment.Spec.Template.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s deploy/%s container/%s", deployment.Namespace, deployment.Name, container.Name)}, + }) } } return images, nil } -func getStatefulSetsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getStatefulSetsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.AppsV1().StatefulSets(kubernetesNamespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return nil, err @@ -144,34 +176,40 @@ func getStatefulSetsImages(kubernetesClient kubernetes.Interface, kubernetesName statefulSet.Spec.Template.Spec.Containers, statefulSet.Spec.Template.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s sts/%s container/%s", statefulSet.Namespace, statefulSet.Name, container.Name)}, + }) } } return images, nil } -func getDaemonSetsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getDaemonSetsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.AppsV1().DaemonSets(kubernetesNamespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return nil, err } - for _, daemonSets := range list.Items { + for _, daemonSet := range list.Items { for _, container := range append( - daemonSets.Spec.Template.Spec.Containers, - daemonSets.Spec.Template.Spec.InitContainers..., + daemonSet.Spec.Template.Spec.Containers, + daemonSet.Spec.Template.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s ds/%s container/%s", daemonSet.Namespace, daemonSet.Name, container.Name)}, + }) } } return images, nil } -func getReplicaSetsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getReplicaSetsImages(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.AppsV1().ReplicaSets(kubernetesNamespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return nil, err @@ -182,14 +220,17 @@ func getReplicaSetsImages(kubernetesClient kubernetes.Interface, kubernetesNames replicaSet.Spec.Template.Spec.Containers, replicaSet.Spec.Template.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s rs/%s container/%s", replicaSet.Namespace, replicaSet.Name, container.Name)}, + }) } } return images, nil } -func getCronJobsImages(ctx context.Context, kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { +func getCronJobsImages(ctx context.Context, kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { images, err := getCronJobsImagesBatchV1(kubernetesClient, kubernetesNamespace) if apierrors.IsNotFound(err) { logboek.Context(ctx).Warn().LogF("\n") @@ -203,8 +244,8 @@ func getCronJobsImages(ctx context.Context, kubernetesClient kubernetes.Interfac return images, err } -func getCronJobsImagesBatchV1(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getCronJobsImagesBatchV1(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.BatchV1().CronJobs(kubernetesNamespace).List(context.Background(), metav1.ListOptions{}) if err != nil { @@ -216,15 +257,18 @@ func getCronJobsImagesBatchV1(kubernetesClient kubernetes.Interface, kubernetesN cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers, cronJob.Spec.JobTemplate.Spec.Template.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s cronjob/%s container/%s", cronJob.Namespace, cronJob.Name, container.Name)}, + }) } } return images, nil } -func getCronJobsImagesBatchV1beta1(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getCronJobsImagesBatchV1beta1(kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.BatchV1beta1().CronJobs(kubernetesNamespace).List(context.Background(), metav1.ListOptions{}) if err != nil { @@ -236,15 +280,18 @@ func getCronJobsImagesBatchV1beta1(kubernetesClient kubernetes.Interface, kubern cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers, cronJob.Spec.JobTemplate.Spec.Template.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s cronjob/%s container/%s", cronJob.Namespace, cronJob.Name, container.Name)}, + }) } } return images, nil } -func getJobsImages(ctx context.Context, kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]string, error) { - var images []string +func getJobsImages(ctx context.Context, kubernetesClient kubernetes.Interface, kubernetesNamespace string) ([]*DeployedImage, error) { + var images []*DeployedImage list, err := kubernetesClient.BatchV1().Jobs(kubernetesNamespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err @@ -256,12 +303,12 @@ FindActiveJobs: switch c.Type { case batchv1.JobComplete: if c.Status == corev1.ConditionTrue { - logboek.Context(ctx).Debug().LogF("Ignore complete job/%s: images in this resource are not used anymore and can be safely removed\n", job.Name) + logboek.Context(ctx).Info().LogF("Ignore complete job/%s: images in this resource are not used anymore and can be safely removed\n", job.Name) continue FindActiveJobs } case batchv1.JobFailed: if c.Status == corev1.ConditionTrue { - logboek.Context(ctx).Debug().LogF("Ignore failed job/%s: images in this resource are not used anymore and can be safely removed\n", job.Name) + logboek.Context(ctx).Info().LogF("Ignore failed job/%s: images in this resource are not used anymore and can be safely removed\n", job.Name) continue FindActiveJobs } } @@ -271,7 +318,10 @@ FindActiveJobs: job.Spec.Template.Spec.Containers, job.Spec.Template.Spec.InitContainers..., ) { - images = append(images, container.Image) + images = AppendDeployedImages(images, &DeployedImage{ + Name: container.Image, + ResourcesNames: []string{fmt.Sprintf("ns/%s job/%s container/%s", job.Namespace, job.Name, container.Name)}, + }) } } diff --git a/pkg/cleaning/cleanup.go b/pkg/cleaning/cleanup.go index 5f4471c9b1..887f72ddb3 100644 --- a/pkg/cleaning/cleanup.go +++ b/pkg/cleaning/cleanup.go @@ -122,19 +122,19 @@ func (m *cleanupManager) run(ctx context.Context) error { return fmt.Errorf("no kubernetes configs found to skip images being used in the Kubernetes, pass --without-kube option (or WERF_WITHOUT_KUBE env var) to suppress this error") } - deployedDockerImagesNames, err := m.deployedDockerImagesNames(ctx) + deployedDockerImages, err := m.deployedDockerImages(ctx) if err != nil { return fmt.Errorf("error getting deployed docker images names from Kubernetes: %w", err) } if err := logboek.Context(ctx).LogProcess("Skipping repo tags that are being used in Kubernetes").DoError(func() error { - return m.skipStageIDsThatAreUsedInKubernetes(ctx, deployedDockerImagesNames) + return m.skipStageIDsThatAreUsedInKubernetes(ctx, deployedDockerImages) }); err != nil { return err } if err := logboek.Context(ctx).LogProcess("Skipping final repo tags that are being used in Kubernetes").DoError(func() error { - return m.skipFinalStageIDsThatAreUsedInKubernetes(ctx, deployedDockerImagesNames) + return m.skipFinalStageIDsThatAreUsedInKubernetes(ctx, deployedDockerImages) }); err != nil { return err } @@ -165,16 +165,24 @@ func (m *cleanupManager) run(ctx context.Context) error { return nil } -func (m *cleanupManager) skipStageIDsThatAreUsedInKubernetes(ctx context.Context, deployedDockerImagesNames []string) error { +func (m *cleanupManager) skipStageIDsThatAreUsedInKubernetes(ctx context.Context, deployedDockerImages []*DeployedDockerImage) error { handledDeployedStages := map[string]bool{} handleTagFunc := func(tag, stageID string, f func()) { dockerImageName := fmt.Sprintf("%s:%s", m.StorageManager.GetStagesStorage().Address(), tag) - for _, deployedDockerImageName := range deployedDockerImagesNames { - if deployedDockerImageName == dockerImageName { + for _, deployedDockerImage := range deployedDockerImages { + if deployedDockerImage.Name == dockerImageName { if !handledDeployedStages[stageID] { f() - logboek.Context(ctx).Default().LogFDetails(" tag: %s\n", tag) + logboek.Context(ctx).Default().LogFDetails("tag: %s\n", tag) + logboek.Context(ctx).Default().LogBlock("used by resources").Do(func() { + for _, cr := range deployedDockerImage.ContextResources { + for _, r := range cr.ResourcesNames { + logboek.Context(ctx).Default().LogF("ctx/%s %s\n", cr.ContextName, r) + } + } + }) + logboek.Context(ctx).LogOptionalLn() handledDeployedStages[stageID] = true } @@ -186,7 +194,7 @@ func (m *cleanupManager) skipStageIDsThatAreUsedInKubernetes(ctx context.Context for _, stageID := range m.stageManager.GetStageIDList() { handleTagFunc(stageID, stageID, func() { - m.stageManager.MarkStageAsProtected(stageID) + m.stageManager.MarkStageAsProtected(stageID, "used in the Kubernetes") }) } @@ -195,7 +203,7 @@ func (m *cleanupManager) skipStageIDsThatAreUsedInKubernetes(ctx context.Context handleTagFunc(customTag, stageID, func() { if m.stageManager.IsStageExist(stageID) { // keep existent stage and associated custom tags - m.stageManager.MarkStageAsProtected(stageID) + m.stageManager.MarkStageAsProtected(stageID, "used in the Kubernetes") } else { // keep custom tags that do not have associated existent stage m.stageManager.ForgetCustomTagsByStageID(stageID) @@ -207,16 +215,16 @@ func (m *cleanupManager) skipStageIDsThatAreUsedInKubernetes(ctx context.Context return nil } -func (m *cleanupManager) skipFinalStageIDsThatAreUsedInKubernetes(ctx context.Context, deployedDockerImagesNames []string) error { +func (m *cleanupManager) skipFinalStageIDsThatAreUsedInKubernetes(ctx context.Context, deployedDockerImages []*DeployedDockerImage) error { handledDeployedFinalStages := map[string]bool{} Loop: for _, stageID := range m.stageManager.GetFinalStageIDList() { dockerImageName := fmt.Sprintf("%s:%s", m.StorageManager.GetFinalStagesStorage().Address(), stageID) - for _, deployedDockerImageName := range deployedDockerImagesNames { - if deployedDockerImageName == dockerImageName { + for _, deployedDockerImage := range deployedDockerImages { + if deployedDockerImage.Name == dockerImageName { if !handledDeployedFinalStages[stageID] { - m.stageManager.MarkFinalStageAsProtected(stageID) + m.stageManager.MarkFinalStageAsProtected(stageID, "used in the Kubernetes") logboek.Context(ctx).Default().LogFDetails(" tag: %s\n", stageID) logboek.Context(ctx).LogOptionalLn() @@ -231,17 +239,68 @@ Loop: return nil } -func (m *cleanupManager) deployedDockerImagesNames(ctx context.Context) ([]string, error) { - var deployedDockerImagesNames []string +type DeployedDockerImage struct { + Name string + ContextResources []*ContextResources +} + +type ContextResources struct { + ContextName string + ResourcesNames []string +} + +func AppendContextDeployedDockerImages(list []*DeployedDockerImage, contextName string, images []*allow_list.DeployedImage) (res []*DeployedDockerImage) { + for _, desc := range list { + res = append(res, &DeployedDockerImage{ + Name: desc.Name, + ContextResources: desc.ContextResources, + }) + } + +AppendNewImages: + for _, i := range images { + for _, desc := range res { + if desc.Name == i.Name { + for _, contextResources := range desc.ContextResources { + if contextResources.ContextName == contextName { + contextResources.ResourcesNames = append(contextResources.ResourcesNames, i.ResourcesNames...) + continue AppendNewImages + } + } + + desc.ContextResources = append(desc.ContextResources, &ContextResources{ + ContextName: contextName, + ResourcesNames: i.ResourcesNames, + }) + continue AppendNewImages + } + } + + res = append(res, &DeployedDockerImage{ + Name: i.Name, + ContextResources: []*ContextResources{ + { + ContextName: contextName, + ResourcesNames: i.ResourcesNames, + }, + }, + }) + } + + return +} + +func (m *cleanupManager) deployedDockerImages(ctx context.Context) ([]*DeployedDockerImage, error) { + var deployedDockerImages []*DeployedDockerImage for _, contextClient := range m.KubernetesContextClients { if err := logboek.Context(ctx).LogProcessInline("Getting deployed docker images (context %s)", contextClient.ContextName). DoError(func() error { - kubernetesClientDeployedDockerImagesNames, err := allow_list.DeployedDockerImages(ctx, contextClient.Client, m.KubernetesNamespaceRestrictionByContext[contextClient.ContextName]) + contextDeployedImages, err := allow_list.DeployedDockerImages(ctx, contextClient.Client, m.KubernetesNamespaceRestrictionByContext[contextClient.ContextName]) if err != nil { return fmt.Errorf("cannot get deployed imagesStageList: %w", err) } - deployedDockerImagesNames = append(deployedDockerImagesNames, kubernetesClientDeployedDockerImagesNames...) + deployedDockerImages = AppendContextDeployedDockerImages(deployedDockerImages, contextClient.ContextName, contextDeployedImages) return nil }); err != nil { @@ -249,7 +308,7 @@ func (m *cleanupManager) deployedDockerImagesNames(ctx context.Context) ([]strin } } - return deployedDockerImagesNames, nil + return deployedDockerImages, nil } func (m *cleanupManager) gitHistoryBasedCleanup(ctx context.Context) error { @@ -395,7 +454,7 @@ func (m *cleanupManager) prepareStageIDTableRows(ctx context.Context, stageIDCus func (m *cleanupManager) handleSavedStageIDs(ctx context.Context, savedStageIDs []string) { logboek.Context(ctx).Default().LogBlock("Saved tags").Do(func() { for _, stageID := range savedStageIDs { - m.stageManager.MarkStageAsProtected(stageID) + m.stageManager.MarkStageAsProtected(stageID, "found in the git history") logboek.Context(ctx).Default().LogFDetails(" tag: %s\n", stageID) logboek.Context(ctx).LogOptionalLn() } @@ -610,17 +669,36 @@ func (m *cleanupManager) cleanupUnusedStages(ctx context.Context) error { // skip stages and their relatives covered by Kubernetes- or git history-based cleanup policies stageDescriptionListToDelete := stageDescriptionList { - var excludedSDList []*image.StageDescription - for _, sd := range m.stageManager.GetProtectedStageDescriptionList() { - var excludedSDListBySD []*image.StageDescription - stageDescriptionListToDelete, excludedSDListBySD = m.excludeStageAndRelativesByImageID(stageDescriptionListToDelete, sd.Info.ID) - excludedSDList = append(excludedSDList, excludedSDListBySD...) + excludedSDListByReason := make(map[string][]*image.StageDescription) + + for reason, sdList := range m.stageManager.GetProtectedStageDescriptionListByReason() { + for _, sd := range sdList { + var excludedSDListBySD []*image.StageDescription + stageDescriptionListToDelete, excludedSDListBySD = m.excludeStageAndRelativesByImageID(stageDescriptionListToDelete, sd.Info.ID) + + for _, exclSD := range excludedSDListBySD { + if sd.Info.Name == exclSD.Info.Name { + excludedSDListByReason[reason] = append(excludedSDListByReason[reason], exclSD) + } else { + ancestorReason := fmt.Sprintf("ancestors of images %s", reason) + excludedSDListByReason[ancestorReason] = append(excludedSDListByReason[ancestorReason], exclSD) + } + } + } } - logboek.Context(ctx).Default().LogBlock("Saved stages (%d/%d)", len(excludedSDList), len(stageDescriptionList)).Do(func() { - for _, excludedSD := range excludedSDList { - logboek.Context(ctx).Default().LogFDetails(" tag: %s\n", excludedSD.Info.Tag) - logboek.Context(ctx).Default().LogOptionalLn() + excludedCount := 0 + for _, list := range excludedSDListByReason { + excludedCount += len(list) + } + + logboek.Context(ctx).Default().LogBlock("Saved stages (%d/%d)", excludedCount, len(stageDescriptionList)).Do(func() { + for reason, list := range excludedSDListByReason { + logboek.Context(ctx).Default().LogProcess("%s (%d)", reason, len(list)).Do(func() { + for _, excludedSD := range list { + logboek.Context(ctx).Default().LogFDetails("%s\n", excludedSD.Info.Tag) + } + }) } }) } diff --git a/pkg/cleaning/stage_manager/manager.go b/pkg/cleaning/stage_manager/manager.go index a1d81eda1c..c22631668b 100644 --- a/pkg/cleaning/stage_manager/manager.go +++ b/pkg/cleaning/stage_manager/manager.go @@ -26,9 +26,10 @@ func NewManager() Manager { } type stage struct { - stageID string - description *image.StageDescription - isProtected bool + stageID string + description *image.StageDescription + isProtected bool + protectionReason string } func newStage(stageID string, description *image.StageDescription) *stage { @@ -184,12 +185,14 @@ func (m *Manager) GetFinalStageIDList() []string { return result } -func (m *Manager) MarkStageAsProtected(stageID string) { +func (m *Manager) MarkStageAsProtected(stageID, reason string) { m.stages[stageID].isProtected = true + m.stages[stageID].protectionReason = reason } -func (m *Manager) MarkFinalStageAsProtected(stageID string) { +func (m *Manager) MarkFinalStageAsProtected(stageID, reason string) { m.finalStages[stageID].isProtected = true + m.finalStages[stageID].protectionReason = reason } // GetImageStageIDCommitListToCleanup method returns existing stage IDs and related existing commits (for each managed image) @@ -328,15 +331,17 @@ func (m *Manager) GetFinalStageDescriptionList(opts StageDescriptionListOptions) return getStageDescriptionList(m.finalStages, opts) } -func (m *Manager) GetProtectedStageDescriptionList() []*image.StageDescription { - var result []*image.StageDescription +func (m *Manager) GetProtectedStageDescriptionListByReason() map[string][]*image.StageDescription { + res := make(map[string][]*image.StageDescription) + for _, stage := range m.stages { - if stage.isProtected { - result = append(result, stage.description) + if !stage.isProtected { + continue } + res[stage.protectionReason] = append(res[stage.protectionReason], stage.description) } - return result + return res } func (m *Manager) IsStageExist(stageID string) bool {