Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs(samples): filter cpu query to get metrics for the correct resources #81

Merged
merged 2 commits into from Aug 4, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
27 changes: 17 additions & 10 deletions samples/metricscaler/metricscaler.py
Expand Up @@ -16,6 +16,7 @@
programmatically scale a Google Cloud Bigtable cluster."""

import argparse
import logging
import os
import time

Expand All @@ -26,8 +27,12 @@

PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']

logger = logging.getLogger('bigtable.metricscaler')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)

def get_cpu_load():

def get_cpu_load(bigtable_instance, bigtable_cluster):
"""Returns the most recent Cloud Bigtable CPU load measurement.

Returns:
Expand All @@ -40,12 +45,13 @@ def get_cpu_load():
metric_type='bigtable.googleapis.com/'
'cluster/cpu_load',
minutes=5)
cpu_query = cpu_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster)
cpu = next(cpu_query.iter())
return cpu.points[0].value.double_value
# [END bigtable_cpu]


def get_storage_utilization():
def get_storage_utilization(bigtable_instance, bigtable_cluster):
"""Returns the most recent Cloud Bigtable storage utilization measurement.

Returns:
Expand All @@ -58,6 +64,7 @@ def get_storage_utilization():
metric_type='bigtable.googleapis.com/'
'cluster/storage_utilization',
minutes=5)
utilization_query = utilization_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster)
utilization = next(utilization_query.iter())
return utilization.points[0].value.double_value
# [END bigtable_metric_scaler_storage_utilization]
Expand Down Expand Up @@ -111,15 +118,15 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
current_node_count + size_change_step, max_node_count)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled up from {} to {} nodes.'.format(
logger.info('Scaled up from {} to {} nodes.'.format(
current_node_count, new_node_count))
else:
if current_node_count > min_node_count:
new_node_count = max(
current_node_count - size_change_step, min_node_count)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled down from {} to {} nodes.'.format(
logger.info('Scaled down from {} to {} nodes.'.format(
current_node_count, new_node_count))
# [END bigtable_scale]

Expand All @@ -145,10 +152,10 @@ def main(
long_sleep (int): How long to sleep after the number of nodes is
changed
"""
cluster_cpu = get_cpu_load()
cluster_storage = get_storage_utilization()
print('Detected cpu of {}'.format(cluster_cpu))
print('Detected storage utilization of {}'.format(cluster_storage))
cluster_cpu = get_cpu_load(bigtable_instance, bigtable_cluster)
cluster_storage = get_storage_utilization(bigtable_instance, bigtable_cluster)
logger.info('Detected cpu of {}'.format(cluster_cpu))
logger.info('Detected storage utilization of {}'.format(cluster_storage))
try:
if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold:
scale_bigtable(bigtable_instance, bigtable_cluster, True)
Expand All @@ -158,10 +165,10 @@ def main(
scale_bigtable(bigtable_instance, bigtable_cluster, False)
time.sleep(long_sleep)
else:
print('CPU within threshold, sleeping.')
logger.info('CPU within threshold, sleeping.')
time.sleep(short_sleep)
except Exception as e:
print("Error during scaling: %s", e)
logger.error("Error during scaling: %s", e)


if __name__ == '__main__':
Expand Down
20 changes: 15 additions & 5 deletions samples/metricscaler/metricscaler_test.py
Expand Up @@ -20,7 +20,7 @@

from google.cloud import bigtable
from google.cloud.bigtable import enums
from mock import patch
from mock import Mock, patch

import pytest

Expand All @@ -41,12 +41,18 @@
# System tests to verify API calls succeed


def test_get_cpu_load():
assert float(get_cpu_load()) > 0.0
@patch('metricscaler.query')
def test_get_cpu_load(monitoring_v3_query):
iter_mock = monitoring_v3_query.Query().select_resources().iter
iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])])
assert float(get_cpu_load(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0


def test_get_storage_utilization():
assert float(get_storage_utilization()) > 0.0
@patch('metricscaler.query')
def test_get_storage_utilization(monitoring_v3_query):
iter_mock = monitoring_v3_query.Query().select_resources().iter
iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])])
assert float(get_storage_utilization(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0


@pytest.fixture()
Expand Down Expand Up @@ -198,3 +204,7 @@ def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep):
scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
BIGTABLE_INSTANCE, True)
scale_bigtable.reset_mock()


if __name__ == '__main__':
test_get_cpu_load()