diff --git a/bigtable/admin.go b/bigtable/admin.go index d7c5655067f..6127c899906 100644 --- a/bigtable/admin.go +++ b/bigtable/admin.go @@ -808,6 +808,10 @@ type InstanceConf struct { StorageType StorageType InstanceType InstanceType Labels map[string]string + + // AutoscalingConfig configures the autoscaling properties on the cluster + // created with the instance. It is optional. + AutoscalingConfig *AutoscalingConfig } // InstanceWithClustersConfig contains the information necessary to create an Instance @@ -824,22 +828,23 @@ var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][ // This method will return when the instance has been created or when an error occurs. func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error { ctx = mergeOutgoingMetadata(ctx, iac.md) - newConfig := InstanceWithClustersConfig{ + newConfig := &InstanceWithClustersConfig{ InstanceID: conf.InstanceId, DisplayName: conf.DisplayName, InstanceType: conf.InstanceType, Labels: conf.Labels, Clusters: []ClusterConfig{ { - InstanceID: conf.InstanceId, - ClusterID: conf.ClusterId, - Zone: conf.Zone, - NumNodes: conf.NumNodes, - StorageType: conf.StorageType, + InstanceID: conf.InstanceId, + ClusterID: conf.ClusterId, + Zone: conf.Zone, + NumNodes: conf.NumNodes, + StorageType: conf.StorageType, + AutoscalingConfig: conf.AutoscalingConfig, }, }, } - return iac.CreateInstanceWithClusters(ctx, &newConfig) + return iac.CreateInstanceWithClusters(ctx, newConfig) } // CreateInstanceWithClusters creates a new instance with configured clusters in the project. @@ -920,7 +925,9 @@ func (iac *InstanceAdminClient) updateInstance(ctx context.Context, conf *Instan // - InstanceID is required // - DisplayName and InstanceType are updated only if they are not empty // - ClusterID is required for any provided cluster -// - All other cluster fields are ignored except for NumNodes, which if set will be updated +// - All other cluster fields are ignored except for NumNodes and +// AutoscalingConfig, which if set will be updated. If both are provided, +// AutoscalingConfig takes precedence. // // This method may return an error after partially succeeding, for example if the instance is updated // but a cluster update fails. If an error is returned, InstanceInfo and Clusters may be called to @@ -941,12 +948,17 @@ func (iac *InstanceAdminClient) UpdateInstanceWithClusters(ctx context.Context, // Update any clusters for _, cluster := range conf.Clusters { - err := iac.UpdateCluster(ctx, conf.InstanceID, cluster.ClusterID, cluster.NumNodes) - if err != nil { + var clusterErr error + if cluster.AutoscalingConfig != nil { + clusterErr = iac.SetAutoscaling(ctx, conf.InstanceID, cluster.ClusterID, *cluster.AutoscalingConfig) + } else if cluster.NumNodes > 0 { + clusterErr = iac.UpdateCluster(ctx, conf.InstanceID, cluster.ClusterID, cluster.NumNodes) + } + if clusterErr != nil { if updatedInstance { // We updated the instance, so note that in the error message. return fmt.Errorf("UpdateCluster %q failed %v; however UpdateInstance succeeded", - cluster.ClusterID, err) + cluster.ClusterID, clusterErr) } return err } @@ -1033,6 +1045,35 @@ func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceID str }, nil } +// AutoscalingConfig contains autoscaling configuration for a cluster. +// For details, see https://cloud.google.com/bigtable/docs/autoscaling. +type AutoscalingConfig struct { + // MinNodes sets the minumum number of nodes in a cluster. MinNodes must + // be 1 or greater. + MinNodes int + // MaxNodes sets the maximum number of nodes in a cluster. MaxNodes must be + // equal to or greater than MinNodes. + MaxNodes int + // CPUTargetPercent sets the CPU utilization target for your cluster's + // workload. + CPUTargetPercent int +} + +func (a *AutoscalingConfig) proto() *btapb.Cluster_ClusterAutoscalingConfig { + if a == nil { + return nil + } + return &btapb.Cluster_ClusterAutoscalingConfig{ + AutoscalingLimits: &btapb.AutoscalingLimits{ + MinServeNodes: int32(a.MinNodes), + MaxServeNodes: int32(a.MaxNodes), + }, + AutoscalingTargets: &btapb.AutoscalingTargets{ + CpuUtilizationPercent: int32(a.CPUTargetPercent), + }, + } +} + // ClusterConfig contains the information necessary to create a cluster type ClusterConfig struct { // InstanceID specifies the unique name of the instance. Required. @@ -1047,7 +1088,9 @@ type ClusterConfig struct { Zone string // NumNodes specifies the number of nodes allocated to this cluster. More - // nodes enable higher throughput and more consistent performance. Required. + // nodes enable higher throughput and more consistent performance. One of + // NumNodes or AutoscalingConfig is required. If both are set, + // AutoscalingConfig takes precedence. NumNodes int32 // StorageType specifies the type of storage used by this cluster to serve @@ -1068,17 +1111,30 @@ type ClusterConfig struct { // key. // Optional. Immutable. KMSKeyName string + + // AutoscalingConfig configures the autoscaling properties on a cluster. + // One of NumNodes or AutoscalingConfig is required. + AutoscalingConfig *AutoscalingConfig } func (cc *ClusterConfig) proto(project string) *btapb.Cluster { - ec := btapb.Cluster_EncryptionConfig{} - ec.KmsKeyName = cc.KMSKeyName - return &btapb.Cluster{ + cl := &btapb.Cluster{ ServeNodes: cc.NumNodes, DefaultStorageType: cc.StorageType.proto(), Location: "projects/" + project + "/locations/" + cc.Zone, - EncryptionConfig: &ec, + EncryptionConfig: &btapb.Cluster_EncryptionConfig{ + KmsKeyName: cc.KMSKeyName, + }, } + + if asc := cc.AutoscalingConfig; asc != nil { + cl.Config = &btapb.Cluster_ClusterConfig_{ + ClusterConfig: &btapb.Cluster_ClusterConfig{ + ClusterAutoscalingConfig: asc.proto(), + }, + } + } + return cl } // ClusterInfo represents information about a cluster. @@ -1100,6 +1156,9 @@ type ClusterInfo struct { // KMSKeyName is the customer managed encryption key for the cluster. KMSKeyName string + + // AutoscalingConfig are the configured values for a cluster. + AutoscalingConfig *AutoscalingConfig } // CreateCluster creates a new cluster in an instance. @@ -1129,13 +1188,48 @@ func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceID, c return err } -// UpdateCluster updates attributes of a cluster +// SetAutoscaling enables autoscaling on a cluster. To remove autoscaling, use +// UpdateCluster. See AutoscalingConfig documentation for deatils. +func (iac *InstanceAdminClient) SetAutoscaling(ctx context.Context, instanceID, clusterID string, conf AutoscalingConfig) error { + ctx = mergeOutgoingMetadata(ctx, iac.md) + cluster := &btapb.Cluster{ + Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID, + Config: &btapb.Cluster_ClusterConfig_{ + ClusterConfig: &btapb.Cluster_ClusterConfig{ + ClusterAutoscalingConfig: conf.proto(), + }, + }, + } + lro, err := iac.iClient.PartialUpdateCluster(ctx, &btapb.PartialUpdateClusterRequest{ + UpdateMask: &field_mask.FieldMask{ + Paths: []string{"cluster_config.cluster_autoscaling_config"}, + }, + Cluster: cluster, + }) + if err != nil { + return err + } + return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, nil) +} + +// UpdateCluster updates attributes of a cluster. If Autoscaling is configured +// for the cluster, it will be removed and replaced by the static number of +// serve nodes specified. func (iac *InstanceAdminClient) UpdateCluster(ctx context.Context, instanceID, clusterID string, serveNodes int32) error { ctx = mergeOutgoingMetadata(ctx, iac.md) cluster := &btapb.Cluster{ Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID, - ServeNodes: serveNodes} - lro, err := iac.iClient.UpdateCluster(ctx, cluster) + ServeNodes: serveNodes, + // Explicitly removing autoscaling config (and including it in the field + // mask below) + Config: nil, + } + lro, err := iac.iClient.PartialUpdateCluster(ctx, &btapb.PartialUpdateClusterRequest{ + UpdateMask: &field_mask.FieldMask{ + Paths: []string{"serve_nodes", "cluster_config.cluster_autoscaling_config"}, + }, + Cluster: cluster, + }) if err != nil { return err } @@ -1167,14 +1261,20 @@ func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceID string) if c.EncryptionConfig != nil { kmsKeyName = c.EncryptionConfig.KmsKeyName } - cis = append(cis, &ClusterInfo{ + ci := &ClusterInfo{ Name: nameParts[len(nameParts)-1], Zone: locParts[len(locParts)-1], ServeNodes: int(c.ServeNodes), State: c.State.String(), StorageType: storageTypeFromProto(c.DefaultStorageType), KMSKeyName: kmsKeyName, - }) + } + if cfg := c.GetClusterConfig(); cfg != nil { + if asc := fromClusterConfigProto(cfg); asc != nil { + ci.AutoscalingConfig = asc + } + } + cis = append(cis, ci) } if len(res.FailedLocations) > 0 { // Return partial results and an error in @@ -1206,7 +1306,7 @@ func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clus } nameParts := strings.Split(c.Name, "/") locParts := strings.Split(c.Location, "/") - cis := &ClusterInfo{ + ci := &ClusterInfo{ Name: nameParts[len(nameParts)-1], Zone: locParts[len(locParts)-1], ServeNodes: int(c.ServeNodes), @@ -1214,7 +1314,31 @@ func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clus StorageType: storageTypeFromProto(c.DefaultStorageType), KMSKeyName: kmsKeyName, } - return cis, nil + // Use type assertion to handle protobuf oneof type + if cfg := c.GetClusterConfig(); cfg != nil { + if asc := fromClusterConfigProto(cfg); asc != nil { + ci.AutoscalingConfig = asc + } + } + return ci, nil +} + +func fromClusterConfigProto(c *btapb.Cluster_ClusterConfig) *AutoscalingConfig { + if c == nil { + return nil + } + if c.ClusterAutoscalingConfig == nil { + return nil + } + got := c.ClusterAutoscalingConfig + if got.AutoscalingLimits == nil || got.AutoscalingTargets == nil { + return nil + } + return &AutoscalingConfig{ + MinNodes: int(got.AutoscalingLimits.MinServeNodes), + MaxNodes: int(got.AutoscalingLimits.MaxServeNodes), + CPUTargetPercent: int(got.AutoscalingTargets.CpuUtilizationPercent), + } } // InstanceIAM returns the instance's IAM handle. @@ -1478,8 +1602,12 @@ func max(x, y int) int { // and the given ClusterConfig. // - Any cluster missing from conf.Clusters but present in the instance will be removed from the instance // using DeleteCluster. -// - Any cluster in conf.Clusters that also exists in the instance will be updated to contain the -// provided number of nodes if set. +// - Any cluster in conf.Clusters that also exists in the instance will be +// updated either to contain the provided number of nodes or to use the +// provided autoscaling config. If both the number of nodes and autoscaling +// are configured, autoscaling takes precedence. If the number of nodes is zero +// and autoscaling is not provided in InstanceWithClustersConfig, the cluster +// is not updated. // // This method may return an error after partially succeeding, for example if the instance is updated // but a cluster update fails. If an error is returned, InstanceInfo and Clusters may be called to @@ -1521,16 +1649,25 @@ func UpdateInstanceAndSyncClusters(ctx context.Context, iac *InstanceAdminClient } delete(existingClusterNames, cluster.ClusterID) - if cluster.NumNodes <= 0 { - // We only synchronize clusters with a valid number of nodes. + if cluster.NumNodes <= 0 && cluster.AutoscalingConfig == nil { + // We only synchronize clusters with a valid number of nodes + // or a valid autoscaling config. continue } - // We simply want to update this cluster - err = iac.UpdateCluster(ctx, conf.InstanceID, cluster.ClusterID, cluster.NumNodes) - if err != nil { + // We update teh clusters autoscaling config, or its number of serve + // nodes. + var updateErr error + if cluster.AutoscalingConfig != nil { + updateErr = iac.SetAutoscaling(ctx, conf.InstanceID, cluster.ClusterID, + *cluster.AutoscalingConfig) + } else { + updateErr = iac.UpdateCluster(ctx, conf.InstanceID, cluster.ClusterID, + cluster.NumNodes) + } + if updateErr != nil { return results, fmt.Errorf("UpdateCluster %q failed %v; Progress: %v", - cluster.ClusterID, err, results) + cluster.ClusterID, updateErr, results) } results.UpdatedClusters = append(results.UpdatedClusters, cluster.ClusterID) } diff --git a/bigtable/admin_test.go b/bigtable/admin_test.go new file mode 100644 index 00000000000..602cef14105 --- /dev/null +++ b/bigtable/admin_test.go @@ -0,0 +1,585 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/anypb" +) + +type mockAdminClock struct { + btapb.BigtableInstanceAdminClient + + createInstanceReq *btapb.CreateInstanceRequest + createClusterReq *btapb.CreateClusterRequest + partialUpdateClusterReq *btapb.PartialUpdateClusterRequest + getClusterResp *btapb.Cluster +} + +func (c *mockAdminClock) PartialUpdateCluster( + ctx context.Context, in *btapb.PartialUpdateClusterRequest, opts ...grpc.CallOption, +) (*longrunning.Operation, error) { + c.partialUpdateClusterReq = in + return &longrunning.Operation{ + Done: true, + Result: &longrunning.Operation_Response{}, + }, nil +} + +func (c *mockAdminClock) CreateInstance( + ctx context.Context, in *btapb.CreateInstanceRequest, opts ...grpc.CallOption, +) (*longrunning.Operation, error) { + c.createInstanceReq = in + return &longrunning.Operation{ + Done: true, + Result: &longrunning.Operation_Response{ + Response: &anypb.Any{TypeUrl: "google.bigtable.admin.v2.Instance"}, + }, + }, nil +} + +func (c *mockAdminClock) CreateCluster( + ctx context.Context, in *btapb.CreateClusterRequest, opts ...grpc.CallOption, +) (*longrunning.Operation, error) { + c.createClusterReq = in + return &longrunning.Operation{ + Done: true, + Result: &longrunning.Operation_Response{ + Response: &anypb.Any{TypeUrl: "google.bigtable.admin.v2.Cluster"}, + }, + }, nil +} +func (c *mockAdminClock) PartialUpdateInstance( + ctx context.Context, in *btapb.PartialUpdateInstanceRequest, opts ...grpc.CallOption, +) (*longrunning.Operation, error) { + return &longrunning.Operation{ + Done: true, + Result: &longrunning.Operation_Response{}, + }, nil +} + +func (c *mockAdminClock) GetCluster( + ctx context.Context, in *btapb.GetClusterRequest, opts ...grpc.CallOption, +) (*btapb.Cluster, error) { + return c.getClusterResp, nil +} + +func (c *mockAdminClock) ListClusters( + ctx context.Context, in *btapb.ListClustersRequest, opts ...grpc.CallOption, +) (*btapb.ListClustersResponse, error) { + return &btapb.ListClustersResponse{Clusters: []*btapb.Cluster{c.getClusterResp}}, nil +} + +func setupClient(t *testing.T, ac btapb.BigtableInstanceAdminClient) *InstanceAdminClient { + ctx := context.Background() + c, err := NewInstanceAdminClient(ctx, "my-cool-project") + if err != nil { + t.Fatalf("NewInstanceAdminClient failed: %v", err) + } + c.iClient = ac + return c +} + +func TestInstanceAdmin_GetCluster(t *testing.T) { + tcs := []struct { + cluster *btapb.Cluster + wantConfig *AutoscalingConfig + desc string + }{ + { + desc: "when autoscaling is not enabled", + cluster: &btapb.Cluster{ + Name: ".../mycluster", + Location: ".../us-central1-a", + State: btapb.Cluster_READY, + DefaultStorageType: btapb.StorageType_SSD, + }, + wantConfig: nil, + }, + { + desc: "when autoscaling is enabled", + cluster: &btapb.Cluster{ + Name: ".../mycluster", + Location: ".../us-central1-a", + State: btapb.Cluster_READY, + DefaultStorageType: btapb.StorageType_SSD, + Config: &btapb.Cluster_ClusterConfig_{ + ClusterConfig: &btapb.Cluster_ClusterConfig{ + ClusterAutoscalingConfig: &btapb.Cluster_ClusterAutoscalingConfig{ + AutoscalingLimits: &btapb.AutoscalingLimits{ + MinServeNodes: 1, + MaxServeNodes: 2, + }, + AutoscalingTargets: &btapb.AutoscalingTargets{ + CpuUtilizationPercent: 10, + }, + }, + }, + }, + }, + wantConfig: &AutoscalingConfig{MinNodes: 1, MaxNodes: 2, CPUTargetPercent: 10}, + }, + } + + for _, tc := range tcs { + t.Run(tc.desc, func(t *testing.T) { + c := setupClient(t, &mockAdminClock{getClusterResp: tc.cluster}) + + info, err := c.GetCluster(context.Background(), "myinst", "mycluster") + if err != nil { + t.Fatalf("GetCluster failed: %v", err) + } + + if gotConfig := info.AutoscalingConfig; !cmp.Equal(gotConfig, tc.wantConfig) { + t.Fatalf("want autoscaling config = %v, got = %v", tc.wantConfig, gotConfig) + } + }) + } +} + +func TestInstanceAdmin_Clusters(t *testing.T) { + tcs := []struct { + cluster *btapb.Cluster + wantConfig *AutoscalingConfig + desc string + }{ + { + desc: "when autoscaling is not enabled", + cluster: &btapb.Cluster{ + Name: ".../mycluster", + Location: ".../us-central1-a", + State: btapb.Cluster_READY, + DefaultStorageType: btapb.StorageType_SSD, + }, + wantConfig: nil, + }, + { + desc: "when autoscaling is enabled", + cluster: &btapb.Cluster{ + Name: ".../mycluster", + Location: ".../us-central1-a", + State: btapb.Cluster_READY, + DefaultStorageType: btapb.StorageType_SSD, + Config: &btapb.Cluster_ClusterConfig_{ + ClusterConfig: &btapb.Cluster_ClusterConfig{ + ClusterAutoscalingConfig: &btapb.Cluster_ClusterAutoscalingConfig{ + AutoscalingLimits: &btapb.AutoscalingLimits{ + MinServeNodes: 1, + MaxServeNodes: 2, + }, + AutoscalingTargets: &btapb.AutoscalingTargets{ + CpuUtilizationPercent: 10, + }, + }, + }, + }, + }, + wantConfig: &AutoscalingConfig{MinNodes: 1, MaxNodes: 2, CPUTargetPercent: 10}, + }, + } + + for _, tc := range tcs { + t.Run(tc.desc, func(t *testing.T) { + c := setupClient(t, &mockAdminClock{getClusterResp: tc.cluster}) + + infos, err := c.Clusters(context.Background(), "myinst") + if err != nil { + t.Fatalf("Clusters failed: %v", err) + } + if len(infos) != 1 { + t.Fatalf("Clusters len: want = 1, got = %v", len(infos)) + } + + info := infos[0] + if gotConfig := info.AutoscalingConfig; !cmp.Equal(gotConfig, tc.wantConfig) { + t.Fatalf("want autoscaling config = %v, got = %v", tc.wantConfig, gotConfig) + } + }) + } +} + +func TestInstanceAdmin_SetAutoscaling(t *testing.T) { + mock := &mockAdminClock{} + c := setupClient(t, mock) + + err := c.SetAutoscaling(context.Background(), "myinst", "mycluster", AutoscalingConfig{ + MinNodes: 1, + MaxNodes: 2, + CPUTargetPercent: 10, + }) + if err != nil { + t.Fatalf("SetAutoscaling failed: %v", err) + } + + wantMask := []string{"cluster_config.cluster_autoscaling_config"} + if gotMask := mock.partialUpdateClusterReq.UpdateMask.Paths; !cmp.Equal(wantMask, gotMask) { + t.Fatalf("want update mask = %v, got = %v", wantMask, gotMask) + } + + wantName := "projects/my-cool-project/instances/myinst/clusters/mycluster" + if gotName := mock.partialUpdateClusterReq.Cluster.Name; gotName != wantName { + t.Fatalf("want name = %v, got = %v", wantName, gotName) + } + + cc := mock.partialUpdateClusterReq.Cluster.Config.(*btapb.Cluster_ClusterConfig_) + gotConfig := cc.ClusterConfig.ClusterAutoscalingConfig + + wantMin := int32(1) + if gotMin := gotConfig.AutoscalingLimits.MinServeNodes; wantMin != gotMin { + t.Fatalf("want autoscaling min nodes = %v, got = %v", wantMin, gotMin) + } + + wantMax := int32(2) + if gotMax := gotConfig.AutoscalingLimits.MaxServeNodes; wantMax != gotMax { + t.Fatalf("want autoscaling max nodes = %v, got = %v", wantMax, gotMax) + } + + wantCPU := int32(10) + if gotCPU := gotConfig.AutoscalingTargets.CpuUtilizationPercent; wantCPU != gotCPU { + t.Fatalf("want autoscaling cpu = %v, got = %v", wantCPU, gotCPU) + } +} + +func TestInstanceAdmin_UpdateCluster_RemovingAutoscaling(t *testing.T) { + mock := &mockAdminClock{} + c := setupClient(t, mock) + + err := c.UpdateCluster(context.Background(), "myinst", "mycluster", 1) + if err != nil { + t.Fatalf("UpdateCluster failed: %v", err) + } + + wantMask := []string{"serve_nodes", "cluster_config.cluster_autoscaling_config"} + if gotMask := mock.partialUpdateClusterReq.UpdateMask.Paths; !cmp.Equal(wantMask, gotMask) { + t.Fatalf("want update mask = %v, got = %v", wantMask, gotMask) + } + + if gotConfig := mock.partialUpdateClusterReq.Cluster.Config; gotConfig != nil { + t.Fatalf("want config = nil, got = %v", gotConfig) + } +} + +func TestInstanceAdmin_CreateInstance_WithAutoscaling(t *testing.T) { + mock := &mockAdminClock{} + c := setupClient(t, mock) + + err := c.CreateInstance(context.Background(), &InstanceConf{ + InstanceId: "myinst", + DisplayName: "myinst", + InstanceType: PRODUCTION, + ClusterId: "mycluster", + Zone: "us-central1-a", + StorageType: SSD, + AutoscalingConfig: &AutoscalingConfig{MinNodes: 1, MaxNodes: 2, CPUTargetPercent: 10}, + }) + if err != nil { + t.Fatalf("CreateInstance failed: %v", err) + } + + mycc := mock.createInstanceReq.Clusters["mycluster"] + cc := mycc.Config.(*btapb.Cluster_ClusterConfig_) + gotConfig := cc.ClusterConfig.ClusterAutoscalingConfig + + wantMin := int32(1) + if gotMin := gotConfig.AutoscalingLimits.MinServeNodes; wantMin != gotMin { + t.Fatalf("want autoscaling min nodes = %v, got = %v", wantMin, gotMin) + } + + wantMax := int32(2) + if gotMax := gotConfig.AutoscalingLimits.MaxServeNodes; wantMax != gotMax { + t.Fatalf("want autoscaling max nodes = %v, got = %v", wantMax, gotMax) + } + + wantCPU := int32(10) + if gotCPU := gotConfig.AutoscalingTargets.CpuUtilizationPercent; wantCPU != gotCPU { + t.Fatalf("want autoscaling cpu = %v, got = %v", wantCPU, gotCPU) + } + + err = c.CreateInstance(context.Background(), &InstanceConf{ + InstanceId: "myinst", + DisplayName: "myinst", + InstanceType: PRODUCTION, + ClusterId: "mycluster", + Zone: "us-central1-a", + StorageType: SSD, + NumNodes: 1, + }) + if err != nil { + t.Fatalf("CreateInstance failed: %v", err) + } + + // omitting autoscaling config results in a nil config in the request + mycc = mock.createInstanceReq.Clusters["mycluster"] + if cc := mycc.GetClusterConfig(); cc != nil { + t.Fatalf("want config = nil, got = %v", gotConfig) + } +} + +func TestInstanceAdmin_CreateInstanceWithClusters_WithAutoscaling(t *testing.T) { + mock := &mockAdminClock{} + c := setupClient(t, mock) + + err := c.CreateInstanceWithClusters(context.Background(), &InstanceWithClustersConfig{ + InstanceID: "myinst", + DisplayName: "myinst", + InstanceType: PRODUCTION, + Clusters: []ClusterConfig{ + { + ClusterID: "mycluster", + Zone: "us-central1-a", + StorageType: SSD, + AutoscalingConfig: &AutoscalingConfig{MinNodes: 1, MaxNodes: 2, CPUTargetPercent: 10}, + }, + }, + }) + if err != nil { + t.Fatalf("CreateInstanceWithClusters failed: %v", err) + } + + mycc := mock.createInstanceReq.Clusters["mycluster"] + cc := mycc.Config.(*btapb.Cluster_ClusterConfig_) + gotConfig := cc.ClusterConfig.ClusterAutoscalingConfig + + wantMin := int32(1) + if gotMin := gotConfig.AutoscalingLimits.MinServeNodes; wantMin != gotMin { + t.Fatalf("want autoscaling min nodes = %v, got = %v", wantMin, gotMin) + } + + wantMax := int32(2) + if gotMax := gotConfig.AutoscalingLimits.MaxServeNodes; wantMax != gotMax { + t.Fatalf("want autoscaling max nodes = %v, got = %v", wantMax, gotMax) + } + + wantCPU := int32(10) + if gotCPU := gotConfig.AutoscalingTargets.CpuUtilizationPercent; wantCPU != gotCPU { + t.Fatalf("want autoscaling cpu = %v, got = %v", wantCPU, gotCPU) + } +} + +func TestInstanceAdmin_CreateCluster_WithAutoscaling(t *testing.T) { + mock := &mockAdminClock{} + c := setupClient(t, mock) + + err := c.CreateCluster(context.Background(), &ClusterConfig{ + ClusterID: "mycluster", + Zone: "us-central1-a", + StorageType: SSD, + AutoscalingConfig: &AutoscalingConfig{MinNodes: 1, MaxNodes: 2, CPUTargetPercent: 10}, + }) + if err != nil { + t.Fatalf("CreateCluster failed: %v", err) + } + + cc := mock.createClusterReq.Cluster.Config.(*btapb.Cluster_ClusterConfig_) + gotConfig := cc.ClusterConfig.ClusterAutoscalingConfig + + wantMin := int32(1) + if gotMin := gotConfig.AutoscalingLimits.MinServeNodes; wantMin != gotMin { + t.Fatalf("want autoscaling min nodes = %v, got = %v", wantMin, gotMin) + } + + wantMax := int32(2) + if gotMax := gotConfig.AutoscalingLimits.MaxServeNodes; wantMax != gotMax { + t.Fatalf("want autoscaling max nodes = %v, got = %v", wantMax, gotMax) + } + + wantCPU := int32(10) + if gotCPU := gotConfig.AutoscalingTargets.CpuUtilizationPercent; wantCPU != gotCPU { + t.Fatalf("want autoscaling cpu = %v, got = %v", wantCPU, gotCPU) + } + + err = c.CreateCluster(context.Background(), &ClusterConfig{ + ClusterID: "mycluster", + Zone: "us-central1-a", + StorageType: SSD, + NumNodes: 1, + }) + if err != nil { + t.Fatalf("CreateCluster failed: %v", err) + } + + // omitting autoscaling config results in a nil config in the request + if cc := mock.createClusterReq.Cluster.GetClusterConfig(); cc != nil { + t.Fatalf("want config = nil, got = %v", gotConfig) + } +} + +func TestInstanceAdmin_UpdateInstanceWithClusters_IgnoresInvalidClusters(t *testing.T) { + mock := &mockAdminClock{} + c := setupClient(t, mock) + + err := c.UpdateInstanceWithClusters(context.Background(), &InstanceWithClustersConfig{ + InstanceID: "myinst", + DisplayName: "myinst", + Clusters: []ClusterConfig{ + { + ClusterID: "mycluster", + Zone: "us-central1-a", + // Cluster has no autoscaling or num nodes + // It should be ignored + }, + }, + }) + if err != nil { + t.Fatalf("UpdateInstanceWithClusters failed: %v", err) + } + + if mock.partialUpdateClusterReq != nil { + t.Fatalf("PartialUpdateCluster should not have been called, got = %v", + mock.partialUpdateClusterReq) + } +} + +func TestInstanceAdmin_UpdateInstanceWithClusters_WithAutoscaling(t *testing.T) { + mock := &mockAdminClock{} + c := setupClient(t, mock) + + err := c.UpdateInstanceWithClusters(context.Background(), &InstanceWithClustersConfig{ + InstanceID: "myinst", + DisplayName: "myinst", + Clusters: []ClusterConfig{ + { + ClusterID: "mycluster", + Zone: "us-central1-a", + AutoscalingConfig: &AutoscalingConfig{MinNodes: 1, MaxNodes: 2, CPUTargetPercent: 10}, + }, + }, + }) + if err != nil { + t.Fatalf("UpdateInstanceWithClusters failed: %v", err) + } + + cc := mock.partialUpdateClusterReq.Cluster.Config.(*btapb.Cluster_ClusterConfig_) + gotConfig := cc.ClusterConfig.ClusterAutoscalingConfig + + wantMin := int32(1) + if gotMin := gotConfig.AutoscalingLimits.MinServeNodes; wantMin != gotMin { + t.Fatalf("want autoscaling min nodes = %v, got = %v", wantMin, gotMin) + } + + wantMax := int32(2) + if gotMax := gotConfig.AutoscalingLimits.MaxServeNodes; wantMax != gotMax { + t.Fatalf("want autoscaling max nodes = %v, got = %v", wantMax, gotMax) + } + + wantCPU := int32(10) + if gotCPU := gotConfig.AutoscalingTargets.CpuUtilizationPercent; wantCPU != gotCPU { + t.Fatalf("want autoscaling cpu = %v, got = %v", wantCPU, gotCPU) + } + + err = c.UpdateInstanceWithClusters(context.Background(), &InstanceWithClustersConfig{ + InstanceID: "myinst", + DisplayName: "myinst", + Clusters: []ClusterConfig{ + { + ClusterID: "mycluster", + Zone: "us-central1-a", + NumNodes: 1, + // no autoscaling config + }, + }, + }) + if err != nil { + t.Fatalf("UpdateInstanceWithClusters failed: %v", err) + } + + got := mock.partialUpdateClusterReq.Cluster.Config + if got != nil { + t.Fatalf("want autoscaling config = nil, got = %v", gotConfig) + } +} + +func TestInstanceAdmin_UpdateInstanceAndSyncClusters_WithAutoscaling(t *testing.T) { + mock := &mockAdminClock{ + getClusterResp: &btapb.Cluster{ + Name: ".../mycluster", + Location: ".../us-central1-a", + State: btapb.Cluster_READY, + DefaultStorageType: btapb.StorageType_SSD, + Config: &btapb.Cluster_ClusterConfig_{ + ClusterConfig: &btapb.Cluster_ClusterConfig{ + ClusterAutoscalingConfig: &btapb.Cluster_ClusterAutoscalingConfig{ + AutoscalingLimits: &btapb.AutoscalingLimits{ + MinServeNodes: 1, + MaxServeNodes: 2, + }, + AutoscalingTargets: &btapb.AutoscalingTargets{ + CpuUtilizationPercent: 10, + }, + }, + }, + }, + }, + } + c := setupClient(t, mock) + + _, err := UpdateInstanceAndSyncClusters(context.Background(), c, &InstanceWithClustersConfig{ + InstanceID: "myinst", + DisplayName: "myinst", + Clusters: []ClusterConfig{ + { + ClusterID: "mycluster", + Zone: "us-central1-a", + AutoscalingConfig: &AutoscalingConfig{MinNodes: 1, MaxNodes: 2, CPUTargetPercent: 10}, + }, + }, + }) + if err != nil { + t.Fatalf("UpdateInstanceAndSyncClusters failed: %v", err) + } + + cc := mock.partialUpdateClusterReq.Cluster.Config.(*btapb.Cluster_ClusterConfig_) + gotConfig := cc.ClusterConfig.ClusterAutoscalingConfig + + wantMin := int32(1) + if gotMin := gotConfig.AutoscalingLimits.MinServeNodes; wantMin != gotMin { + t.Fatalf("want autoscaling min nodes = %v, got = %v", wantMin, gotMin) + } + + wantMax := int32(2) + if gotMax := gotConfig.AutoscalingLimits.MaxServeNodes; wantMax != gotMax { + t.Fatalf("want autoscaling max nodes = %v, got = %v", wantMax, gotMax) + } + + wantCPU := int32(10) + if gotCPU := gotConfig.AutoscalingTargets.CpuUtilizationPercent; wantCPU != gotCPU { + t.Fatalf("want autoscaling cpu = %v, got = %v", wantCPU, gotCPU) + } + + _, err = UpdateInstanceAndSyncClusters(context.Background(), c, &InstanceWithClustersConfig{ + InstanceID: "myinst", + DisplayName: "myinst", + Clusters: []ClusterConfig{ + { + ClusterID: "mycluster", + Zone: "us-central1-a", + NumNodes: 1, + }, + }, + }) + if err != nil { + t.Fatalf("UpdateInstanceAndSyncClusters failed: %v", err) + } + got := mock.partialUpdateClusterReq.Cluster.Config + if got != nil { + t.Fatalf("want autoscaling config = nil, got = %v", gotConfig) + } +} diff --git a/bigtable/go.mod b/bigtable/go.mod index f95dad2e89c..73a50b8fb8a 100644 --- a/bigtable/go.mod +++ b/bigtable/go.mod @@ -12,7 +12,7 @@ require ( golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e google.golang.org/api v0.65.0 - google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 + google.golang.org/genproto v0.0.0-20220111164026-67b88f271998 google.golang.org/grpc v1.40.1 google.golang.org/protobuf v1.27.1 rsc.io/binaryregexp v0.2.0 diff --git a/bigtable/go.sum b/bigtable/go.sum index 9b56f52e831..60f596771a7 100644 --- a/bigtable/go.sum +++ b/bigtable/go.sum @@ -524,8 +524,9 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998 h1:g/x+MYjJYDEP3OBCYYmwIbt4x6k3gryb+ohyOR7PXfI= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/bigtable/integration_test.go b/bigtable/integration_test.go index 2e2105e2646..e0c03252492 100644 --- a/bigtable/integration_test.go +++ b/bigtable/integration_test.go @@ -1982,10 +1982,125 @@ func TestIntegration_AdminUpdateInstanceAndSyncClusters(t *testing.T) { } } +func TestIntegration_Autoscaling(t *testing.T) { + if instanceToCreate == "" { + t.Skip("instanceToCreate not set, skipping instance update testing") + } + instanceToCreate += "4" + + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + if !testEnv.Config().UseProd { + t.Skip("emulator doesn't support instance creation") + } + + timeout := 5 * time.Minute + ctx, _ := context.WithTimeout(context.Background(), timeout) + + iAdminClient, err := testEnv.NewInstanceAdminClient() + if err != nil { + t.Fatalf("NewInstanceAdminClient: %v", err) + } + defer iAdminClient.Close() + + clusterID := instanceToCreate + "-cluster" + + t.Log("creating an instance with autoscaling ON (Min = 3, Max = 4)") + conf := &InstanceConf{ + InstanceId: instanceToCreate, + ClusterId: clusterID, + DisplayName: "test instance", + Zone: instanceToCreateZone, + InstanceType: PRODUCTION, + AutoscalingConfig: &AutoscalingConfig{ + MinNodes: 3, + MaxNodes: 4, + CPUTargetPercent: 60, + }, + } + if err := iAdminClient.CreateInstance(ctx, conf); err != nil { + t.Fatalf("CreateInstance: %v", err) + } + defer iAdminClient.DeleteInstance(ctx, instanceToCreate) + + cluster, err := iAdminClient.GetCluster(ctx, instanceToCreate, clusterID) + if err != nil { + t.Fatalf("GetCluster: %v", err) + } + wantNodes := 3 + if gotNodes := cluster.ServeNodes; gotNodes != wantNodes { + t.Fatalf("want cluster nodes = %v, got = %v", wantNodes, gotNodes) + } + wantMin := 3 + if gotMin := cluster.AutoscalingConfig.MinNodes; gotMin != wantMin { + t.Fatalf("want cluster autoscaling min = %v, got = %v", wantMin, gotMin) + } + wantMax := 4 + if gotMax := cluster.AutoscalingConfig.MaxNodes; gotMax != wantMax { + t.Fatalf("want cluster autoscaling max = %v, got = %v", wantMax, gotMax) + } + wantCPU := 60 + if gotCPU := cluster.AutoscalingConfig.CPUTargetPercent; gotCPU != wantCPU { + t.Fatalf("want cluster autoscaling CPU target = %v, got = %v", wantCPU, gotCPU) + } + + serveNodes := 1 + t.Logf("setting autoscaling OFF and setting serve nodes to %v", serveNodes) + err = iAdminClient.UpdateCluster(ctx, instanceToCreate, clusterID, int32(serveNodes)) + if err != nil { + t.Fatalf("UpdateCluster: %v", err) + } + cluster, err = iAdminClient.GetCluster(ctx, instanceToCreate, clusterID) + if err != nil { + t.Fatalf("GetCluster: %v", err) + } + wantNodes = 1 + if gotNodes := cluster.ServeNodes; gotNodes != wantNodes { + t.Fatalf("want cluster nodes = %v, got = %v", wantNodes, gotNodes) + } + if gotAsc := cluster.AutoscalingConfig; gotAsc != nil { + t.Fatalf("want cluster autoscaling = nil, got = %v", gotAsc) + } + + ac := AutoscalingConfig{ + MinNodes: 3, + MaxNodes: 4, + CPUTargetPercent: 80, + } + t.Logf("setting autoscaling ON (Min = %v, Max = %v)", ac.MinNodes, ac.MaxNodes) + err = iAdminClient.SetAutoscaling(ctx, instanceToCreate, clusterID, ac) + if err != nil { + t.Fatalf("SetAutoscaling: %v", err) + } + cluster, err = iAdminClient.GetCluster(ctx, instanceToCreate, clusterID) + if err != nil { + t.Fatalf("GetCluster: %v", err) + } + wantMin = ac.MinNodes + if gotMin := cluster.AutoscalingConfig.MinNodes; gotMin != wantMin { + t.Fatalf("want cluster autoscaling min = %v, got = %v", wantMin, gotMin) + } + wantMax = ac.MaxNodes + if gotMax := cluster.AutoscalingConfig.MaxNodes; gotMax != wantMax { + t.Fatalf("want cluster autoscaling max = %v, got = %v", wantMax, gotMax) + } + wantCPU = ac.CPUTargetPercent + if gotCPU := cluster.AutoscalingConfig.CPUTargetPercent; gotCPU != wantCPU { + t.Fatalf("want cluster autoscaling CPU target = %v, got = %v", wantCPU, gotCPU) + } + +} + // instanceAdminClientMock is used to test FailedLocations field processing. type instanceAdminClientMock struct { Clusters []*btapb.Cluster UnavailableLocations []string + // Imbedding the interface allows test writers to override just the methods + // that are interesting for a test and ignore the rest. btapb.BigtableInstanceAdminClient }