Skip to content

Commit

Permalink
feat: sync-options annotation with Force=true (#414) (#560)
Browse files Browse the repository at this point in the history
Signed-off-by: kkk777-7 <kota.kimura0725@gmail.com>
  • Loading branch information
kkk777-7 committed Apr 16, 2024
1 parent 1ade3a1 commit fbecbb8
Show file tree
Hide file tree
Showing 4 changed files with 118 additions and 54 deletions.
2 changes: 2 additions & 0 deletions pkg/sync/common/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ const (
SyncOptionPruneLast = "PruneLast=true"
// Sync option that enables use of replace or create command instead of apply
SyncOptionReplace = "Replace=true"
// Sync option that enables use of --force flag, delete and re-create
SyncOptionForce = "Force=true"
// Sync option that enables use of --server-side flag instead of client-side
SyncOptionServerSideApply = "ServerSideApply=true"
// Sync option that disables resource deletion
Expand Down
39 changes: 20 additions & 19 deletions pkg/sync/sync_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -459,8 +459,8 @@ func (sc *syncContext) Sync() {
// if pruned tasks pending deletion, then wait...
prunedTasksPendingDelete := tasks.Filter(func(t *syncTask) bool {
if t.pruned() && t.liveObj != nil {
return t.liveObj.GetDeletionTimestamp() != nil
}
return t.liveObj.GetDeletionTimestamp() != nil
}
return false
})
if prunedTasksPendingDelete.Len() > 0 {
Expand Down Expand Up @@ -761,31 +761,31 @@ func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) {
// for prune tasks, modify the waves for proper cleanup i.e reverse of sync wave (creation order)
pruneTasks := make(map[int][]*syncTask)
for _, task := range tasks {
if task.isPrune() {
pruneTasks[task.wave()] = append(pruneTasks[task.wave()], task)
}
if task.isPrune() {
pruneTasks[task.wave()] = append(pruneTasks[task.wave()], task)
}
}

var uniquePruneWaves []int
for k := range pruneTasks {
uniquePruneWaves = append(uniquePruneWaves, k)
uniquePruneWaves = append(uniquePruneWaves, k)
}
sort.Ints(uniquePruneWaves)

// reorder waves for pruning tasks using symmetric swap on prune waves
n := len(uniquePruneWaves)
for i := 0; i < n/2; i++ {
// waves to swap
startWave := uniquePruneWaves[i]
endWave := uniquePruneWaves[n-1-i]
for _, task := range pruneTasks[startWave] {
// waves to swap
startWave := uniquePruneWaves[i]
endWave := uniquePruneWaves[n-1-i]

for _, task := range pruneTasks[startWave] {
task.waveOverride = &endWave
}
for _, task := range pruneTasks[endWave] {
}

for _, task := range pruneTasks[endWave] {
task.waveOverride = &startWave
}
}
}

// for pruneLast tasks, modify the wave to sync phase last wave of tasks + 1
Expand Down Expand Up @@ -940,7 +940,7 @@ func (sc *syncContext) ensureCRDReady(name string) error {
})
}

func (sc *syncContext) applyObject(t *syncTask, dryRun, force, validate bool) (common.ResultCode, string) {
func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.ResultCode, string) {
dryRunStrategy := cmdutil.DryRunNone
if dryRun {
// irrespective of the dry run mode set in the sync context, always run
Expand All @@ -954,6 +954,7 @@ func (sc *syncContext) applyObject(t *syncTask, dryRun, force, validate bool) (c
var err error
var message string
shouldReplace := sc.replace || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionReplace)
force := sc.force || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionForce)
// if it is a dry run, disable server side apply, as the goal is to validate only the
// yaml correctness of the rendered manifests.
// running dry-run in server mode breaks the auto create namespace feature
Expand Down Expand Up @@ -1233,7 +1234,7 @@ func (sc *syncContext) processCreateTasks(state runState, tasks syncTasks, dryRu
logCtx := sc.log.WithValues("dryRun", dryRun, "task", t)
logCtx.V(1).Info("Applying")
validate := sc.validate && !resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionsDisableValidation)
result, message := sc.applyObject(t, dryRun, sc.force, validate)
result, message := sc.applyObject(t, dryRun, validate)
if result == common.ResultCodeSyncFailed {
logCtx.WithValues("message", message).Info("Apply failed")
state = failed
Expand Down
114 changes: 80 additions & 34 deletions pkg/sync/sync_context_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -840,6 +840,52 @@ func TestSync_ServerSideApply(t *testing.T) {
}
}

func withForceAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured {
un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionForce})
return un
}

func withForceAndReplaceAnnotations(un *unstructured.Unstructured) *unstructured.Unstructured {
un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: "Force=true,Replace=true"})
return un
}

func TestSync_Force(t *testing.T) {
testCases := []struct {
name string
target *unstructured.Unstructured
live *unstructured.Unstructured
commandUsed string
force bool
}{
{"NoAnnotation", NewPod(), NewPod(), "apply", false},
{"ForceApplyAnnotationIsSet", withForceAnnotation(NewPod()), NewPod(), "apply", true},
{"ForceReplaceAnnotationIsSet", withForceAndReplaceAnnotations(NewPod()), NewPod(), "replace", true},
{"LiveObjectMissing", withReplaceAnnotation(NewPod()), nil, "create", false},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
syncCtx := newTestSyncCtx(nil)

tc.target.SetNamespace(FakeArgoCDNamespace)
if tc.live != nil {
tc.live.SetNamespace(FakeArgoCDNamespace)
}
syncCtx.resources = groupResources(ReconciliationResult{
Live: []*unstructured.Unstructured{tc.live},
Target: []*unstructured.Unstructured{tc.target},
})

syncCtx.Sync()

resourceOps, _ := syncCtx.resourceOps.(*kubetest.MockResourceOps)
assert.Equal(t, tc.commandUsed, resourceOps.GetLastResourceCommand(kube.GetResourceKey(tc.target)))
assert.Equal(t, tc.force, resourceOps.GetLastForce())
})
}
}

func TestSelectiveSyncOnly(t *testing.T) {
pod1 := NewPod()
pod1.SetName("pod-1")
Expand Down Expand Up @@ -1771,11 +1817,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
// no change in wave order
expectedWaveOrder: map[string]int{
// new wave // original wave
ns.GetName(): 0, // 0
pod1.GetName(): 1, // 1
pod2.GetName(): 2, // 2
pod3.GetName(): 3, // 3
pod4.GetName(): 4, // 4
ns.GetName(): 0, // 0
pod1.GetName(): 1, // 1
pod2.GetName(): 2, // 2
pod3.GetName(): 3, // 3
pod4.GetName(): 4, // 4
},
},
{
Expand All @@ -1785,11 +1831,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
// change in prune wave order
expectedWaveOrder: map[string]int{
// new wave // original wave
ns.GetName(): 4, // 0
pod1.GetName(): 3, // 1
pod2.GetName(): 2, // 2
pod3.GetName(): 1, // 3
pod4.GetName(): 0, // 4
ns.GetName(): 4, // 0
pod1.GetName(): 3, // 1
pod2.GetName(): 2, // 2
pod3.GetName(): 1, // 3
pod4.GetName(): 0, // 4
},
},
{
Expand All @@ -1799,13 +1845,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
// change in prune wave order
expectedWaveOrder: map[string]int{
// new wave // original wave
pod1.GetName(): 4, // 1
pod3.GetName(): 3, // 3
pod4.GetName(): 1, // 4
pod1.GetName(): 4, // 1
pod3.GetName(): 3, // 3
pod4.GetName(): 1, // 4

// no change since non prune tasks
ns.GetName(): 0, // 0
pod2.GetName(): 2, // 2
ns.GetName(): 0, // 0
pod2.GetName(): 2, // 2
},
},
}
Expand All @@ -1830,13 +1876,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
// change in prune wave order
expectedWaveOrder: map[string]int{
// new wave // original wave
pod1.GetName(): 5, // 1
pod2.GetName(): 5, // 2
pod3.GetName(): 5, // 3
pod4.GetName(): 5, // 4
pod1.GetName(): 5, // 1
pod2.GetName(): 5, // 2
pod3.GetName(): 5, // 3
pod4.GetName(): 5, // 4

// no change since non prune tasks
ns.GetName(): 0, // 0
ns.GetName(): 0, // 0
},
},
{
Expand All @@ -1847,13 +1893,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
// change in wave order
expectedWaveOrder: map[string]int{
// new wave // original wave
pod1.GetName(): 4, // 1
pod2.GetName(): 5, // 2
pod3.GetName(): 2, // 3
pod4.GetName(): 1, // 4
pod1.GetName(): 4, // 1
pod2.GetName(): 5, // 2
pod3.GetName(): 2, // 3
pod4.GetName(): 1, // 4

// no change since non prune tasks
ns.GetName(): 0, // 0
ns.GetName(): 0, // 0
},
},
}
Expand All @@ -1877,11 +1923,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) {
// change in prune wave order
expectedWaveOrder: map[string]int{
// new wave // original wave
pod1.GetName(): 5, // 1
pod3.GetName(): 4, // 3
pod4.GetName(): 4, // 3
pod5.GetName(): 3, // 4
pod7.GetName(): 1, // 5
pod1.GetName(): 5, // 1
pod3.GetName(): 4, // 3
pod4.GetName(): 4, // 3
pod5.GetName(): 3, // 4
pod7.GetName(): 1, // 5

// no change since non prune tasks
ns.GetName(): -1, // -1
Expand Down Expand Up @@ -1941,8 +1987,8 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) {

// simulate successful delete of pod3
syncCtx.resources = groupResources(ReconciliationResult{
Target: []*unstructured.Unstructured{nil, nil, },
Live: []*unstructured.Unstructured{pod1, pod2, },
Target: []*unstructured.Unstructured{nil, nil},
Live: []*unstructured.Unstructured{pod1, pod2},
})

// next sync should prune only pod2
Expand All @@ -1966,8 +2012,8 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) {

// simulate successful delete of pod2
syncCtx.resources = groupResources(ReconciliationResult{
Target: []*unstructured.Unstructured{nil, },
Live: []*unstructured.Unstructured{pod1, },
Target: []*unstructured.Unstructured{nil},
Live: []*unstructured.Unstructured{pod1},
})

// next sync should proceed with next wave
Expand Down
17 changes: 16 additions & 1 deletion pkg/utils/kube/kubetest/mock_resource_operations.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ type MockResourceOps struct {
lastValidate bool
serverSideApply bool
serverSideApplyManager string
lastForce bool

recordLock sync.RWMutex

Expand Down Expand Up @@ -73,6 +74,19 @@ func (r *MockResourceOps) SetLastServerSideApplyManager(manager string) {
r.recordLock.Unlock()
}

func (r *MockResourceOps) SetLastForce(force bool) {
r.recordLock.Lock()
r.lastForce = force
r.recordLock.Unlock()
}

func (r *MockResourceOps) GetLastForce() bool {
r.recordLock.RLock()
force := r.lastForce
r.recordLock.RUnlock()
return force
}

func (r *MockResourceOps) SetLastResourceCommand(key kube.ResourceKey, cmd string) {
r.recordLock.Lock()
if r.lastCommandPerResource == nil {
Expand All @@ -95,6 +109,7 @@ func (r *MockResourceOps) ApplyResource(ctx context.Context, obj *unstructured.U
r.SetLastValidate(validate)
r.SetLastServerSideApply(serverSideApply)
r.SetLastServerSideApplyManager(manager)
r.SetLastForce(force)
r.SetLastResourceCommand(kube.GetResourceKey(obj), "apply")
command, ok := r.Commands[obj.GetName()]
if !ok {
Expand All @@ -105,9 +120,9 @@ func (r *MockResourceOps) ApplyResource(ctx context.Context, obj *unstructured.U
}

func (r *MockResourceOps) ReplaceResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force bool) (string, error) {
r.SetLastForce(force)
command, ok := r.Commands[obj.GetName()]
r.SetLastResourceCommand(kube.GetResourceKey(obj), "replace")

if !ok {
return "", nil
}
Expand Down

0 comments on commit fbecbb8

Please sign in to comment.