Skip to content

Commit

Permalink
fixup for UT test_ha_vm_failover.ml
Browse files Browse the repository at this point in the history
Update test cases for the changed behavior: stopping spread evenly
planning when one anti-affinity VM failed to be planned.

Signed-off-by: Gang Ji <gang.ji@citrix.com>
  • Loading branch information
Gang Ji committed May 8, 2024
1 parent 8362ae5 commit 4496714
Showing 1 changed file with 20 additions and 27 deletions.
47 changes: 20 additions & 27 deletions ocaml/tests/test_ha_vm_failover.ml
Original file line number Diff line number Diff line change
Expand Up @@ -727,13 +727,16 @@ let extract_output_for_anti_affinity_plan __context pool anti_affinity_plan =
Xapi_ha_vm_failover.anti_affinity_grp_ranked_hosts ~__context
(hosts_increasing |> List.map fst)
in
anti_affinity_plan ~__context hosts_increasing slave1_vms_increasing
vm_can_boot_on_host anti_affinity_grp_ranked_hosts_list []
|> List.map (fun (vm, host) ->
( Db.VM.get_name_label ~__context ~self:vm
, Db.Host.get_name_label ~__context ~self:host
)
)
try
anti_affinity_plan ~__context hosts_increasing slave1_vms_increasing
vm_can_boot_on_host anti_affinity_grp_ranked_hosts_list []
|> List.map (fun (vm, host) ->
( Db.VM.get_name_label ~__context ~self:vm
, Db.Host.get_name_label ~__context ~self:host
)
)
with Api_errors.Server_error (no_hosts_available, []) as e ->
[("Anti-affinity VMs plan failed", Printexc.to_string e)]

let anti_affinity_plan_test_cases =
[
Expand Down Expand Up @@ -947,7 +950,7 @@ let anti_affinity_plan_test_cases =
, (* Assert that no_breach_plan returns as expected *)
[(vm2_grp1, master); (vm1_grp1, slave2)]
)
; (* Test 7: Two anti-affinity VMs belong to one group in slave1 to be evacuated, only 1 got planed *)
; (* Test 7: Two anti-affinity VMs belong to one group in slave1 to be evacuated, only 1 can be planed *)
( {
basic_pool with
master= {memory_total= gib 512L; name_label= master; vms= []}
Expand Down Expand Up @@ -976,11 +979,15 @@ let anti_affinity_plan_test_cases =
]
}
, (* Assert that spread_evenly_plan returns as expected *)
[(vm1_grp1, slave2)]
[
( "Anti-affinity VMs plan failed"
, "Server_error(NO_HOSTS_AVAILABLE, [ ])"
)
]
, (* Assert that no_breach_plan returns as expected *)
[(vm1_grp1, slave2)]
)
; (* Test 8: 6 anti-affinity VMs belong to one group in slave1 to be evacuated *)
; (* Test 8: 6 anti-affinity VMs belong to one group in slave1 to be evacuated, only 5 can be planned *)
( {
basic_pool with
master= {memory_total= gib 640L; name_label= master; vms= []}
Expand Down Expand Up @@ -1034,11 +1041,9 @@ let anti_affinity_plan_test_cases =
}
, (* Assert that spread_evenly_plan returns as expected *)
[
(vm5_grp1, master)
; (vm4_grp1, master)
; (vm3_grp1, slave2)
; (vm2_grp1, master)
; (vm1_grp1, slave2)
( "Anti-affinity VMs plan failed"
, "Server_error(NO_HOSTS_AVAILABLE, [ ])"
)
]
, (* Assert that no_breach_plan returns as expected *)
[(vm2_grp1, master); (vm1_grp1, slave2)]
Expand Down Expand Up @@ -1066,12 +1071,6 @@ let anti_affinity_plan_test_cases =
; vm_name_label= vm5_grp2
; groups= [{name_label= grp2; placement= AntiAffinity}]
}
; {
basic_vm with
memory= gib 513L
; vm_name_label= vm8_grp1
; groups= [{name_label= grp1; placement= AntiAffinity}]
}
; {
basic_vm with
memory= gib 130L
Expand Down Expand Up @@ -1148,12 +1147,6 @@ let anti_affinity_plan_test_cases =
; vm_name_label= vm5_grp2
; groups= [{name_label= grp2; placement= AntiAffinity}]
}
; {
basic_vm with
memory= gib 490L
; vm_name_label= vm8_grp1
; groups= [{name_label= grp1; placement= AntiAffinity}]
}
; {
basic_vm with
memory= gib 130L
Expand Down

0 comments on commit 4496714

Please sign in to comment.