/
local_docker_server.go
545 lines (440 loc) · 18.5 KB
/
local_docker_server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
package host_cleaning
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/dustin/go-humanize"
"github.com/werf/lockgate"
"github.com/werf/logboek"
"github.com/werf/werf/pkg/container_runtime"
"github.com/werf/werf/pkg/docker"
"github.com/werf/werf/pkg/image"
"github.com/werf/werf/pkg/storage/lrumeta"
"github.com/werf/werf/pkg/volumeutils"
"github.com/werf/werf/pkg/werf"
"github.com/werf/kubedog/pkg/utils"
)
const (
MinImagesToDelete = 10
)
func GetLocalDockerServerStoragePath(ctx context.Context) (string, error) {
dockerInfo, err := docker.Info(ctx)
if err != nil {
return "", fmt.Errorf("unable to get docker info: %s", err)
}
var storagePath string
if dockerInfo.OperatingSystem == "Docker Desktop" {
switch runtime.GOOS {
case "windows":
storagePath = filepath.Join(os.Getenv("HOMEDRIVE"), `\\ProgramData\DockerDesktop\vm-data\`)
case "darwin":
storagePath = filepath.Join(os.Getenv("HOME"), "Library/Containers/com.docker.docker/Data")
}
} else {
storagePath = dockerInfo.DockerRootDir
}
if _, err := os.Stat(storagePath); os.IsNotExist(err) {
return "", nil
} else if err != nil {
return "", fmt.Errorf("error accessing %q: %s", storagePath, err)
}
return storagePath, nil
}
func getDockerServerStoragePath(ctx context.Context, dockerServerStoragePathOption string) (string, error) {
var dockerServerStoragePath string
if dockerServerStoragePathOption != "" {
dockerServerStoragePath = dockerServerStoragePathOption
} else {
path, err := GetLocalDockerServerStoragePath(ctx)
if err != nil {
return "", err
}
dockerServerStoragePath = path
}
return dockerServerStoragePath, nil
}
func ShouldRunAutoGCForLocalDockerServer(ctx context.Context, allowedVolumeUsagePercentage float64, dockerServerStoragePath string) (bool, error) {
if dockerServerStoragePath == "" {
return false, nil
}
vu, err := volumeutils.GetVolumeUsageByPath(ctx, dockerServerStoragePath)
if err != nil {
return false, fmt.Errorf("error getting volume usage by path %q: %s", dockerServerStoragePath, err)
}
return vu.Percentage > allowedVolumeUsagePercentage, nil
}
type LocalDockerServerStorageCheckResult struct {
VolumeUsage volumeutils.VolumeUsage
TotalImagesBytes uint64
ImagesDescs []*LocalImageDesc
}
func (checkResult *LocalDockerServerStorageCheckResult) GetBytesToFree(targetVolumeUsage float64) uint64 {
allowedVolumeUsageToFree := checkResult.VolumeUsage.Percentage - targetVolumeUsage
bytesToFree := uint64((float64(checkResult.VolumeUsage.TotalBytes) / 100.0) * allowedVolumeUsageToFree)
return bytesToFree
}
func GetLocalDockerServerStorageCheck(ctx context.Context, dockerServerStoragePath string) (*LocalDockerServerStorageCheckResult, error) {
res := &LocalDockerServerStorageCheckResult{}
vu, err := volumeutils.GetVolumeUsageByPath(ctx, dockerServerStoragePath)
if err != nil {
return nil, fmt.Errorf("error getting volume usage by path %q: %s", dockerServerStoragePath, err)
}
res.VolumeUsage = vu
var images []types.ImageSummary
{
filterSet := filters.NewArgs()
filterSet.Add("label", image.WerfLabel)
filterSet.Add("label", image.WerfStageDigestLabel)
imgs, err := docker.Images(ctx, types.ImageListOptions{Filters: filterSet})
if err != nil {
return nil, fmt.Errorf("unable to get werf docker images: %s", err)
}
images = append(images, imgs...)
}
{
filterSet := filters.NewArgs()
filterSet.Add("label", image.WerfLabel)
filterSet.Add("label", "werf-stage-signature") // v1.1 legacy images
imgs, err := docker.Images(ctx, types.ImageListOptions{Filters: filterSet})
if err != nil {
return nil, fmt.Errorf("unable to get werf v1.1 legacy docker images: %s", err)
}
ExcludeLocalV1_1StagesStorage:
for _, img := range imgs {
for _, ref := range img.RepoTags {
// Do not remove stages-storage=:local images, because this is primary stages storage data, and it can only be cleaned by the werf-cleanup command
if strings.HasPrefix(ref, "werf-stages-storage/") {
continue ExcludeLocalV1_1StagesStorage
}
}
images = append(images, img)
}
}
{
// **NOTICE** Remove v1.1 last-run-at timestamp check when v1.1 reaches its end of life
t, err := werf.GetWerfLastRunAtV1_1(ctx)
if err != nil {
return nil, fmt.Errorf("error getting v1.1 last run timestamp: %s", err)
}
// No werf v1.1 runs on this host.
// This is stupid check, but the only available safe option at the moment.
if t.IsZero() {
filterSet := filters.NewArgs()
filterSet.Add("reference", "*client-id-*")
filterSet.Add("reference", "*managed-image-*")
filterSet.Add("reference", "*meta-*")
filterSet.Add("reference", "*import-metadata-*")
filterSet.Add("reference", "*-rejected")
filterSet.Add("reference", "werf-client-id/*")
filterSet.Add("reference", "werf-managed-images/*")
filterSet.Add("reference", "werf-images-metadata-by-commit/*")
filterSet.Add("reference", "werf-import-metadata/*")
imgs, err := docker.Images(ctx, types.ImageListOptions{Filters: filterSet})
if err != nil {
return nil, fmt.Errorf("unable to get werf service images: %s", err)
}
for _, img := range imgs {
// **NOTICE.** Cannot remove by werf label, because currently there is no such label for service-images by historical reasons.
// So check by size at least for now.
if img.Size != 0 {
continue
}
images = append(images, img)
}
}
}
CreateImagesDescs:
for _, imageSummary := range images {
data, _ := json.Marshal(imageSummary)
logboek.Context(ctx).Debug().LogF("Image summary:\n%s\n---\n", data)
res.TotalImagesBytes += uint64(imageSummary.VirtualSize - imageSummary.SharedSize)
lastUsedAt := time.Unix(imageSummary.Created, 0)
CheckEachRef:
for _, ref := range imageSummary.RepoTags {
// IMPORTANT: ignore none images, these may be either orphans or just built fresh images and we shall not delete these
if ref == "<none>:<none>" {
continue CreateImagesDescs
}
lastRecentlyUsedAt, err := lrumeta.CommonLRUImagesCache.GetImageLastAccessTime(ctx, ref)
if err != nil {
return nil, fmt.Errorf("error accessing last recently used images cache: %s", err)
}
if lastRecentlyUsedAt.IsZero() {
continue CheckEachRef
}
lastUsedAt = lastRecentlyUsedAt
break
}
desc := &LocalImageDesc{
ImageSummary: imageSummary,
LastUsedAt: lastUsedAt,
}
res.ImagesDescs = append(res.ImagesDescs, desc)
}
sort.Sort(ImagesLruSort(res.ImagesDescs))
return res, nil
}
func RunGCForLocalDockerServer(ctx context.Context, allowedVolumeUsagePercentage, allowedVolumeUsageMarginPercentage float64, dockerServerStoragePath string, force, dryRun bool) error {
if dockerServerStoragePath == "" {
return nil
}
targetVolumeUsage := allowedVolumeUsagePercentage - allowedVolumeUsageMarginPercentage
if targetVolumeUsage < 0 {
targetVolumeUsage = 0
}
checkResult, err := GetLocalDockerServerStorageCheck(ctx, dockerServerStoragePath)
if err != nil {
return fmt.Errorf("error getting local docker server storage check: %s", err)
}
bytesToFree := checkResult.GetBytesToFree(targetVolumeUsage)
if checkResult.VolumeUsage.Percentage <= allowedVolumeUsagePercentage {
logboek.Context(ctx).Default().LogBlock("Local docker server storage check").Do(func() {
logboek.Context(ctx).Default().LogF("Docker server storage path: %s\n", dockerServerStoragePath)
logboek.Context(ctx).Default().LogF("Volume usage: %s / %s\n", humanize.Bytes(checkResult.VolumeUsage.UsedBytes), humanize.Bytes(checkResult.VolumeUsage.TotalBytes))
logboek.Context(ctx).Default().LogF("Allowed volume usage percentage: %s <= %s — %s\n", utils.GreenF("%0.2f%%", checkResult.VolumeUsage.Percentage), utils.BlueF("%0.2f%%", allowedVolumeUsagePercentage), utils.GreenF("OK"))
})
return nil
}
logboek.Context(ctx).Default().LogBlock("Local docker server storage check").Do(func() {
logboek.Context(ctx).Default().LogF("Docker server storage path: %s\n", dockerServerStoragePath)
logboek.Context(ctx).Default().LogF("Volume usage: %s / %s\n", humanize.Bytes(checkResult.VolumeUsage.UsedBytes), humanize.Bytes(checkResult.VolumeUsage.TotalBytes))
logboek.Context(ctx).Default().LogF("Allowed percentage level exceeded: %s > %s — %s\n", utils.RedF("%0.2f%%", checkResult.VolumeUsage.Percentage), utils.YellowF("%0.2f%%", allowedVolumeUsagePercentage), utils.RedF("HIGH VOLUME USAGE"))
logboek.Context(ctx).Default().LogF("Target percentage level after cleanup: %0.2f%% - %0.2f%% (margin) = %s\n", allowedVolumeUsagePercentage, allowedVolumeUsageMarginPercentage, utils.BlueF("%0.2f%%", targetVolumeUsage))
logboek.Context(ctx).Default().LogF("Needed to free: %s\n", utils.RedF("%s", humanize.Bytes(bytesToFree)))
logboek.Context(ctx).Default().LogF("Available images to free: %s\n", utils.YellowF("%d", len(checkResult.ImagesDescs)))
})
var processedDockerImagesIDs []string
var processedDockerContainersIDs []string
for {
var freedBytes uint64
var freedImagesCount uint64
var acquiredHostLocks []lockgate.LockHandle
if len(checkResult.ImagesDescs) > 0 {
if err := logboek.Context(ctx).Default().LogProcess("Running cleanup for least recently used docker images created by werf").DoError(func() error {
DeleteImages:
for _, desc := range checkResult.ImagesDescs {
for _, id := range processedDockerImagesIDs {
if desc.ImageSummary.ID == id {
logboek.Context(ctx).Default().LogFDetails("Skip already processed image %q\n", desc.ImageSummary.ID)
continue DeleteImages
}
}
processedDockerImagesIDs = append(processedDockerImagesIDs, desc.ImageSummary.ID)
imageRemoved := false
if len(desc.ImageSummary.RepoTags) > 0 {
allTagsRemoved := true
for _, ref := range desc.ImageSummary.RepoTags {
if ref == "<none>:<none>" {
if err := removeImage(ctx, desc.ImageSummary.ID, force, dryRun); err != nil {
logboek.Context(ctx).Warn().LogF("failed to remove local docker image by ID %q: %s\n", desc.ImageSummary.ID, err)
allTagsRemoved = false
}
} else {
lockName := container_runtime.ImageLockName(ref)
isLocked, lock, err := werf.AcquireHostLock(ctx, lockName, lockgate.AcquireOptions{NonBlocking: true})
if err != nil {
return fmt.Errorf("error locking image %q: %s", lockName, err)
}
if !isLocked {
logboek.Context(ctx).Default().LogFDetails("Image %q is locked at the moment: skip removal\n", ref)
continue DeleteImages
}
acquiredHostLocks = append(acquiredHostLocks, lock)
if err := removeImage(ctx, ref, force, dryRun); err != nil {
logboek.Context(ctx).Warn().LogF("failed to remove local docker image by repo tag %q: %s\n", ref, err)
allTagsRemoved = false
}
}
}
if allTagsRemoved {
imageRemoved = true
}
} else if len(desc.ImageSummary.RepoDigests) > 0 {
allDigestsRemoved := true
for _, repoDigest := range desc.ImageSummary.RepoDigests {
if err := removeImage(ctx, repoDigest, force, dryRun); err != nil {
logboek.Context(ctx).Warn().LogF("failed to remove local docker image by repo digest %q: %s\n", repoDigest, err)
allDigestsRemoved = false
}
}
if allDigestsRemoved {
imageRemoved = true
}
}
if imageRemoved {
freedBytes += uint64(desc.ImageSummary.VirtualSize - desc.ImageSummary.SharedSize)
freedImagesCount++
}
if freedImagesCount < MinImagesToDelete {
continue
}
if freedBytes > bytesToFree {
break
}
}
logboek.Context(ctx).Default().LogF("Freed images: %s\n", utils.GreenF("%d", freedImagesCount))
return nil
}); err != nil {
return err
}
}
if freedImagesCount == 0 {
logboek.Context(ctx).Warn().LogF("WARNING: Detected high docker storage volume usage, while no werf images available to cleanup!\n")
logboek.Context(ctx).Warn().LogF("WARNING:\n")
logboek.Context(ctx).Warn().LogF("WARNING: Werf tries to maintain host clean by deleting:\n")
logboek.Context(ctx).Warn().LogF("WARNING: - old unused files from werf caches (which are stored in the ~/.werf/local_cache);\n")
logboek.Context(ctx).Warn().LogF("WARNING: - old temporary service files /tmp/werf-project-data-* and /tmp/werf-config-render-*;\n")
logboek.Context(ctx).Warn().LogF("WARNING: - least recently used werf images.\n")
logboek.Context(ctx).Warn().LogF("WARNING:\n")
logboek.Context(ctx).Warn().LogF("WARNING: Werf-host-cleanup procedure of v1.2 werf version will not cleanup --stages-storage=:local stages of v1.1 werf version, because this is primary stages storage data, and it can only be cleaned by the regular per-project werf-cleanup command with git-history based algorithm.\n")
logboek.Context(ctx).Warn().LogOptionalLn()
}
for _, lock := range acquiredHostLocks {
if err := werf.ReleaseHostLock(lock); err != nil {
return fmt.Errorf("unable to release lock %q: %s", lock.LockName, err)
}
}
commonOptions := CommonOptions{
RmContainersThatUseWerfImages: force,
SkipUsedImages: !force,
RmiForce: force,
RmForce: true,
DryRun: dryRun,
}
if err := logboek.Context(ctx).Default().LogProcess("Running cleanup for docker containers created by werf").DoError(func() error {
newProcessedContainersIDs, err := safeContainersCleanup(ctx, processedDockerContainersIDs, commonOptions)
if err != nil {
return fmt.Errorf("safe containers cleanup failed: %s", err)
}
processedDockerContainersIDs = newProcessedContainersIDs
return nil
}); err != nil {
return err
}
if err := logboek.Context(ctx).Default().LogProcess("Running cleanup for dangling docker images created by werf").DoError(func() error {
return safeDanglingImagesCleanup(ctx, commonOptions)
}); err != nil {
return err
}
if freedImagesCount == 0 {
break
}
if dryRun {
break
}
logboek.Context(ctx).Default().LogOptionalLn()
checkResult, err = GetLocalDockerServerStorageCheck(ctx, dockerServerStoragePath)
if err != nil {
return fmt.Errorf("error getting local docker server storage check: %s", err)
}
if checkResult.VolumeUsage.Percentage <= targetVolumeUsage {
logboek.Context(ctx).Default().LogBlock("Local docker server storage check").Do(func() {
logboek.Context(ctx).Default().LogF("Docker server storage path: %s\n", dockerServerStoragePath)
logboek.Context(ctx).Default().LogF("Volume usage: %s / %s\n", humanize.Bytes(checkResult.VolumeUsage.UsedBytes), humanize.Bytes(checkResult.VolumeUsage.TotalBytes))
logboek.Context(ctx).Default().LogF("Target volume usage percentage: %s <= %s — %s\n", utils.GreenF("%0.2f%%", checkResult.VolumeUsage.Percentage), utils.BlueF("%0.2f%%", targetVolumeUsage), utils.GreenF("OK"))
})
break
}
bytesToFree = checkResult.GetBytesToFree(targetVolumeUsage)
logboek.Context(ctx).Default().LogBlock("Local docker server storage check").Do(func() {
logboek.Context(ctx).Default().LogF("Docker server storage path: %s\n", dockerServerStoragePath)
logboek.Context(ctx).Default().LogF("Volume usage: %s / %s\n", humanize.Bytes(checkResult.VolumeUsage.UsedBytes), humanize.Bytes(checkResult.VolumeUsage.TotalBytes))
logboek.Context(ctx).Default().LogF("Target volume usage percentage: %s > %s — %s\n", utils.RedF("%0.2f%%", checkResult.VolumeUsage.Percentage), utils.BlueF("%0.2f%%", targetVolumeUsage), utils.RedF("HIGH VOLUME USAGE"))
logboek.Context(ctx).Default().LogF("Needed to free: %s\n", utils.RedF("%s", humanize.Bytes(bytesToFree)))
logboek.Context(ctx).Default().LogF("Available images to free: %s\n", utils.YellowF("%d", len(checkResult.ImagesDescs)))
})
}
return nil
}
func removeImage(ctx context.Context, ref string, force, dryRun bool) error {
logboek.Context(ctx).Default().LogF("Removing %s\n", ref)
if dryRun {
return nil
}
args := []string{ref}
if force {
args = append(args, "--force")
}
return docker.CliRmi(ctx, args...)
}
type LocalImageDesc struct {
ImageSummary types.ImageSummary
LastUsedAt time.Time
}
type ImagesLruSort []*LocalImageDesc
func (a ImagesLruSort) Len() int { return len(a) }
func (a ImagesLruSort) Less(i, j int) bool {
return a[i].LastUsedAt.Before(a[j].LastUsedAt)
}
func (a ImagesLruSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func safeDanglingImagesCleanup(ctx context.Context, options CommonOptions) error {
images, err := trueDanglingImages(ctx)
if err != nil {
return err
}
var imagesToRemove []types.ImageSummary
for _, img := range images {
imagesToRemove = append(imagesToRemove, img)
}
imagesToRemove, err = processUsedImages(ctx, imagesToRemove, options)
if err != nil {
return err
}
if err := imagesRemove(ctx, imagesToRemove, options); err != nil {
return err
}
return nil
}
func safeContainersCleanup(ctx context.Context, processedDockerContainersIDs []string, options CommonOptions) ([]string, error) {
containers, err := werfContainersByFilterSet(ctx, filters.NewArgs())
if err != nil {
return nil, fmt.Errorf("cannot get stages build containers: %s", err)
}
ProcessContainers:
for _, container := range containers {
for _, id := range processedDockerContainersIDs {
if id == container.ID {
continue ProcessContainers
}
}
processedDockerContainersIDs = append(processedDockerContainersIDs, container.ID)
var containerName string
for _, name := range container.Names {
if strings.HasPrefix(name, fmt.Sprintf("/%s", image.StageContainerNamePrefix)) {
containerName = strings.TrimPrefix(name, "/")
break
}
}
if containerName == "" {
logboek.Context(ctx).Warn().LogF("Ignore bad container %s\n", container.ID)
continue
}
if err := func() error {
containerLockName := container_runtime.ContainerLockName(containerName)
isLocked, lock, err := werf.AcquireHostLock(ctx, containerLockName, lockgate.AcquireOptions{NonBlocking: true})
if err != nil {
return fmt.Errorf("failed to lock %s for container %s: %s", containerLockName, logContainerName(container), err)
}
if !isLocked {
logboek.Context(ctx).Default().LogFDetails("Ignore container %s used by another process\n", logContainerName(container))
return nil
}
defer werf.ReleaseHostLock(lock)
if err := containersRemove(ctx, []types.Container{container}, options); err != nil {
return fmt.Errorf("failed to remove container %s: %s", logContainerName(container), err)
}
return nil
}(); err != nil {
return nil, err
}
}
return processedDockerContainersIDs, nil
}