Skip to content

Commit

Permalink
tests: drop instance / cluster node counts for quota (#89)
Browse files Browse the repository at this point in the history
Toward #87
  • Loading branch information
tseaver committed Aug 5, 2020
1 parent 4796ac8 commit 89bdd32
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 14 deletions.
13 changes: 8 additions & 5 deletions docs/snippets.py
Expand Up @@ -114,7 +114,7 @@ def test_bigtable_create_instance():
my_instance_id = "inst-my-" + UNIQUE_SUFFIX
my_cluster_id = "clus-my-" + UNIQUE_SUFFIX
location_id = "us-central1-f"
serve_nodes = 3
serve_nodes = 1
storage_type = enums.StorageType.SSD
production = enums.Instance.Type.PRODUCTION
labels = {"prod-label": "prod-label"}
Expand Down Expand Up @@ -155,7 +155,7 @@ def test_bigtable_create_additional_cluster():

cluster_id = "clus-my-" + UNIQUE_SUFFIX
location_id = "us-central1-a"
serve_nodes = 3
serve_nodes = 1
storage_type = enums.StorageType.SSD

cluster = instance.cluster(
Expand Down Expand Up @@ -447,10 +447,11 @@ def test_bigtable_delete_cluster():
client = Client(admin=True)
instance = client.instance(INSTANCE_ID)
cluster_id = "clus-my-" + UNIQUE_SUFFIX
serve_nodes = 1
cluster = instance.cluster(
cluster_id,
location_id=ALT_LOCATION_ID,
serve_nodes=SERVER_NODES,
serve_nodes=serve_nodes,
default_storage_type=STORAGE_TYPE,
)
operation = cluster.create()
Expand All @@ -477,10 +478,11 @@ def test_bigtable_delete_instance():

instance_id = "snipt-inst-del" + UNIQUE_SUFFIX
instance = client.instance(instance_id, instance_type=PRODUCTION, labels=LABELS)
serve_nodes = 1
cluster = instance.cluster(
"clus-to-delete" + UNIQUE_SUFFIX,
location_id=ALT_LOCATION_ID,
serve_nodes=1,
serve_nodes=serve_nodes,
default_storage_type=STORAGE_TYPE,
)
operation = instance.create(clusters=[cluster])
Expand Down Expand Up @@ -727,11 +729,12 @@ def test_bigtable_cluster_from_pb():

name = cluster.name
cluster_state = cluster.state
serve_nodes = 1
cluster_pb = instance_pb2.Cluster(
name=name,
location=LOCATION_ID,
state=cluster_state,
serve_nodes=SERVER_NODES,
serve_nodes=serve_nodes,
default_storage_type=STORAGE_TYPE,
)

Expand Down
4 changes: 2 additions & 2 deletions samples/instanceadmin/instanceadmin.py
Expand Up @@ -50,7 +50,7 @@ def run_instance_operations(project_id, instance_id):
'''
client = bigtable.Client(project=project_id, admin=True)
location_id = 'us-central1-f'
serve_nodes = 3
serve_nodes = 1
storage_type = enums.StorageType.SSD
production = enums.Instance.Type.PRODUCTION
labels = {'prod-label': 'prod-label'}
Expand Down Expand Up @@ -170,7 +170,7 @@ def add_cluster(project_id, instance_id, cluster_id):
instance = client.instance(instance_id)

location_id = 'us-central1-a'
serve_nodes = 3
serve_nodes = 1
storage_type = enums.StorageType.SSD

if not instance.exists():
Expand Down
2 changes: 1 addition & 1 deletion samples/metricscaler/metricscaler.py
Expand Up @@ -89,7 +89,7 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
# SSD lusters, and 8 TB for HDD. The
# "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring
# out the minimum number of nodes.
min_node_count = 3
min_node_count = 1

# The maximum number of nodes to use. The default maximum is 30 nodes per
# zone. If you need more quota, you can request more by following the
Expand Down
2 changes: 1 addition & 1 deletion samples/metricscaler/metricscaler_test.py
Expand Up @@ -61,7 +61,7 @@ def instance():

client = bigtable.Client(project=PROJECT, admin=True)

serve_nodes = 3
serve_nodes = 1
storage_type = enums.StorageType.SSD
production = enums.Instance.Type.PRODUCTION
labels = {'prod-label': 'prod-label'}
Expand Down
12 changes: 7 additions & 5 deletions tests/system.py
Expand Up @@ -188,8 +188,9 @@ def test_create_instance_defaults(self):
ALT_INSTANCE_ID = "ndef" + UNIQUE_SUFFIX
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS)
ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster"
serve_nodes = 1
cluster = instance.cluster(
ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES
ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=serve_nodes
)
operation = instance.create(clusters=[cluster])

Expand Down Expand Up @@ -267,16 +268,17 @@ def test_create_instance_w_two_clusters(self):
ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2"
LOCATION_ID_2 = "us-central1-f"
STORAGE_TYPE = enums.StorageType.HDD
serve_nodes = 1
cluster_1 = instance.cluster(
ALT_CLUSTER_ID_1,
location_id=LOCATION_ID,
serve_nodes=SERVE_NODES,
serve_nodes=serve_nodes,
default_storage_type=STORAGE_TYPE,
)
cluster_2 = instance.cluster(
ALT_CLUSTER_ID_2,
location_id=LOCATION_ID_2,
serve_nodes=SERVE_NODES,
serve_nodes=serve_nodes,
default_storage_type=STORAGE_TYPE,
)
operation = instance.create(clusters=[cluster_1, cluster_2])
Expand Down Expand Up @@ -482,7 +484,7 @@ def test_update_type(self):
instance = Config.CLIENT.instance(
ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS
)
operation = instance.create(location_id=LOCATION_ID, serve_nodes=None)
operation = instance.create(location_id=LOCATION_ID)

# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
Expand Down Expand Up @@ -532,7 +534,7 @@ def test_create_cluster(self):

ALT_CLUSTER_ID = INSTANCE_ID + "-c2"
ALT_LOCATION_ID = "us-central1-f"
ALT_SERVE_NODES = 4
ALT_SERVE_NODES = 2

cluster_2 = Config.INSTANCE.cluster(
ALT_CLUSTER_ID,
Expand Down

0 comments on commit 89bdd32

Please sign in to comment.