diff --git a/.coveragerc b/.coveragerc index 0d8e6297..ed75c17d 100644 --- a/.coveragerc +++ b/.coveragerc @@ -23,16 +23,14 @@ omit = [report] fail_under = 100 show_missing = True +omit = google/cloud/monitoring/__init__.py exclude_lines = # Re-enable the standard pragma pragma: NO COVER # Ignore debug-only repr def __repr__ - # Ignore abstract methods - raise NotImplementedError -omit = - */gapic/*.py - */proto/*.py - */core/*.py - */site-packages/*.py - google/cloud/__init__.py + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg index 9ac55bd8..8011fbc2 100644 --- a/.kokoro/samples/python3.6/common.cfg +++ b/.kokoro/samples/python3.6/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.6" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py36" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-monitoring/.kokoro/test-samples.sh" @@ -24,12 +30,6 @@ env_vars: { value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" } -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py36" -} - # Download secrets for samples gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg index 1f5ce5da..700a0322 100644 --- a/.kokoro/samples/python3.7/common.cfg +++ b/.kokoro/samples/python3.7/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.7" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py37" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-monitoring/.kokoro/test-samples.sh" @@ -24,12 +30,6 @@ env_vars: { value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" } -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py37" -} - # Download secrets for samples gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg index 816c3b79..c96cb4da 100644 --- a/.kokoro/samples/python3.8/common.cfg +++ b/.kokoro/samples/python3.8/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.8" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py38" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-monitoring/.kokoro/test-samples.sh" @@ -24,12 +30,6 @@ env_vars: { value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" } -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py38" -} - # Download secrets for samples gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" diff --git a/README.rst b/README.rst index 174171df..210b4875 100644 --- a/README.rst +++ b/README.rst @@ -51,11 +51,13 @@ dependencies. Supported Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 +Python >= 3.6 -Deprecated Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. +Unsupported Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Python == 2.7. + +The last version of this library compatible with Python 2.7 is google-cloud-monitoring==1.1.0. Mac/Linux diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 00000000..6320fd92 --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,159 @@ +# 2.0.0 Migration Guide + +The 2.0 release of the `google-cloud-monitoring` client is a significant upgrade based on a [next-gen code generator](https://github.com/googleapis/gapic-generator-python), and includes substantial interface changes. Existing code written for earlier versions of this library will likely require updates to use this version. This document describes the changes that have been made, and what you need to do to update your usage. + +If you experience issues or have questions, please file an [issue](https://github.com/googleapis/python-monitoring/issues). + +## Supported Python Versions + +> **WARNING**: Breaking change + +The 2.0.0 release requires Python 3.6+. + + +## Method Calls + +> **WARNING**: Breaking change + +Methods expect request objects. We provide a script that will convert most common use cases. + +* Install the library + +```py +python3 -m pip install google-cloud-monitoring +``` + +* The script `fixup_monitoring_v3_keywords.py` is shipped with the library. It expects +an input directory (with the code to convert) and an empty destination directory. + +```sh +$ fixup_monitoring_v3_keywords.py --input-directory .samples/ --output-directory samples/ +``` + +**Before:** +```py +from google.cloud import monitoring_v3 + +client = monitoring_v3.MetricServiceClient() + +metric_descriptor = client.get_metric_descriptor("name") +``` + + +**After:** +```py +from google.cloud import monitoring_v3 + +client = monitoring_v3.MetricServiceClient() + +metric_descriptor = client.get_metric_descriptor(request={"name": "name"}) +``` + +### More Details + +In `google-cloud-monitoring<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. + +**Before:** +```py + def create_metric_descriptor( + self, + name, + metric_descriptor, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): +``` + +In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a parameter is required or optional. + +Some methods have additional keyword only parameters. The available parameters depend on the `google.api.method_signature` annotation specified by the API producer. + + +**After:** +```py + def create_metric_descriptor( + self, + request: metric_service.CreateMetricDescriptorRequest = None, + *, + name: str = None, + metric_descriptor: ga_metric.MetricDescriptor = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> ga_metric.MetricDescriptor: +``` + +> **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. +> Passing both will result in an error. + + +Both of these calls are valid: + +```py +response = client.create_metric_descriptor( + request={ + "name": name, + "metric_descriptor": metric_descriptor + } +) +``` + +```py +response = client.create_metric_descriptor( + name=name, + metric_descriptor=metric_descriptor +) +``` + +This call is invalid because it mixes `request` with a keyword argument `metric_descriptor`. Executing this code +will result in an error. + +```py +response = client.create_metric_descriptor( + request={ + "name": name, + }, + metric_descriptor=metric_descriptor +) +``` + + + +## Enums and Types + + +> **WARNING**: Breaking change + +The submodules `enums` and `types` have been removed. + +**Before:** +```py +from google.cloud import monitoring_v3 + +launch_stage = monitoring_v3.enums.LaunchStage.ALPHA +policy = monitoring_v3.types.AlertPolicy(name="name") +``` + + +**After:** +```py +from google.cloud import monitoring_v3 + +launch_stage = monitoring_v3.LaunchStage.ALPHA +policy = monitoring_v3.AlertPolicy(name="name") +``` + +## Project Path Helper Method + +`project_path` method is renamed `common_project_path`. + +**Before:** +```py +project_path = client.project_path("project_id") +``` + +**After:** +```py +project_path = client.common_project_path("project_id") +``` \ No newline at end of file diff --git a/docs/UPGRADING.md b/docs/UPGRADING.md new file mode 120000 index 00000000..01097c8c --- /dev/null +++ b/docs/UPGRADING.md @@ -0,0 +1 @@ +../UPGRADING.md \ No newline at end of file diff --git a/docs/gapic/v3/api.rst b/docs/gapic/v3/api.rst deleted file mode 100644 index 5eb2a6bb..00000000 --- a/docs/gapic/v3/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Cloud Monitoring API -=============================== - -.. automodule:: google.cloud.monitoring_v3 - :members: - :inherited-members: \ No newline at end of file diff --git a/docs/gapic/v3/types.rst b/docs/gapic/v3/types.rst deleted file mode 100644 index b1914656..00000000 --- a/docs/gapic/v3/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Cloud Monitoring API Client -===================================== - -.. automodule:: google.cloud.monitoring_v3.types - :members: \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index df1ac5ee..27fc9024 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,8 +8,18 @@ Api Reference :maxdepth: 2 query.rst - gapic/v3/api - gapic/v3/types + monitoring_v3/services + monitoring_v3/types + +Migration Guide +--------------- + +See the guide below for instructions on migrating to the 2.x release of this library. + +.. toctree:: + :maxdepth: 2 + + UPGRADING Changelog --------- diff --git a/docs/monitoring_v3/services.rst b/docs/monitoring_v3/services.rst new file mode 100644 index 00000000..d6930ee6 --- /dev/null +++ b/docs/monitoring_v3/services.rst @@ -0,0 +1,21 @@ +Services for Google Cloud Monitoring v3 API +=========================================== + +.. automodule:: google.cloud.monitoring_v3.services.alert_policy_service + :members: + :inherited-members: +.. automodule:: google.cloud.monitoring_v3.services.group_service + :members: + :inherited-members: +.. automodule:: google.cloud.monitoring_v3.services.metric_service + :members: + :inherited-members: +.. automodule:: google.cloud.monitoring_v3.services.notification_channel_service + :members: + :inherited-members: +.. automodule:: google.cloud.monitoring_v3.services.service_monitoring_service + :members: + :inherited-members: +.. automodule:: google.cloud.monitoring_v3.services.uptime_check_service + :members: + :inherited-members: diff --git a/docs/monitoring_v3/types.rst b/docs/monitoring_v3/types.rst new file mode 100644 index 00000000..686802b5 --- /dev/null +++ b/docs/monitoring_v3/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Monitoring v3 API +======================================== + +.. automodule:: google.cloud.monitoring_v3.types + :members: diff --git a/google/cloud/monitoring.py b/google/cloud/monitoring.py deleted file mode 100644 index 42c574b1..00000000 --- a/google/cloud/monitoring.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import - -from google.cloud.monitoring_v3 import AlertPolicyServiceClient -from google.cloud.monitoring_v3 import GroupServiceClient -from google.cloud.monitoring_v3 import MetricServiceClient -from google.cloud.monitoring_v3 import NotificationChannelServiceClient -from google.cloud.monitoring_v3 import ServiceMonitoringServiceClient -from google.cloud.monitoring_v3 import UptimeCheckServiceClient -from google.cloud.monitoring_v3 import enums -from google.cloud.monitoring_v3 import types - - -__all__ = ( - "enums", - "types", - "AlertPolicyServiceClient", - "GroupServiceClient", - "MetricServiceClient", - "NotificationChannelServiceClient", - "ServiceMonitoringServiceClient", - "UptimeCheckServiceClient", -) diff --git a/google/cloud/monitoring/__init__.py b/google/cloud/monitoring/__init__.py new file mode 100644 index 00000000..9b32a207 --- /dev/null +++ b/google/cloud/monitoring/__init__.py @@ -0,0 +1,318 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.monitoring_v3.services.alert_policy_service.async_client import ( + AlertPolicyServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.alert_policy_service.client import ( + AlertPolicyServiceClient, +) +from google.cloud.monitoring_v3.services.group_service.async_client import ( + GroupServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.group_service.client import GroupServiceClient +from google.cloud.monitoring_v3.services.metric_service.async_client import ( + MetricServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.metric_service.client import ( + MetricServiceClient, +) +from google.cloud.monitoring_v3.services.notification_channel_service.async_client import ( + NotificationChannelServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.notification_channel_service.client import ( + NotificationChannelServiceClient, +) +from google.cloud.monitoring_v3.services.service_monitoring_service.async_client import ( + ServiceMonitoringServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.service_monitoring_service.client import ( + ServiceMonitoringServiceClient, +) +from google.cloud.monitoring_v3.services.uptime_check_service.async_client import ( + UptimeCheckServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.uptime_check_service.client import ( + UptimeCheckServiceClient, +) +from google.cloud.monitoring_v3.types.alert import AlertPolicy +from google.cloud.monitoring_v3.types.alert_service import CreateAlertPolicyRequest +from google.cloud.monitoring_v3.types.alert_service import DeleteAlertPolicyRequest +from google.cloud.monitoring_v3.types.alert_service import GetAlertPolicyRequest +from google.cloud.monitoring_v3.types.alert_service import ListAlertPoliciesRequest +from google.cloud.monitoring_v3.types.alert_service import ListAlertPoliciesResponse +from google.cloud.monitoring_v3.types.alert_service import UpdateAlertPolicyRequest +from google.cloud.monitoring_v3.types.common import Aggregation +from google.cloud.monitoring_v3.types.common import ComparisonType +from google.cloud.monitoring_v3.types.common import ServiceTier +from google.cloud.monitoring_v3.types.common import TimeInterval +from google.cloud.monitoring_v3.types.common import TypedValue +from google.cloud.monitoring_v3.types.dropped_labels import DroppedLabels +from google.cloud.monitoring_v3.types.group import Group +from google.cloud.monitoring_v3.types.group_service import CreateGroupRequest +from google.cloud.monitoring_v3.types.group_service import DeleteGroupRequest +from google.cloud.monitoring_v3.types.group_service import GetGroupRequest +from google.cloud.monitoring_v3.types.group_service import ListGroupMembersRequest +from google.cloud.monitoring_v3.types.group_service import ListGroupMembersResponse +from google.cloud.monitoring_v3.types.group_service import ListGroupsRequest +from google.cloud.monitoring_v3.types.group_service import ListGroupsResponse +from google.cloud.monitoring_v3.types.group_service import UpdateGroupRequest +from google.cloud.monitoring_v3.types.metric import LabelValue +from google.cloud.monitoring_v3.types.metric import Point +from google.cloud.monitoring_v3.types.metric import QueryError +from google.cloud.monitoring_v3.types.metric import TextLocator +from google.cloud.monitoring_v3.types.metric import TimeSeries +from google.cloud.monitoring_v3.types.metric import TimeSeriesData +from google.cloud.monitoring_v3.types.metric import TimeSeriesDescriptor +from google.cloud.monitoring_v3.types.metric_service import ( + CreateMetricDescriptorRequest, +) +from google.cloud.monitoring_v3.types.metric_service import CreateTimeSeriesError +from google.cloud.monitoring_v3.types.metric_service import CreateTimeSeriesRequest +from google.cloud.monitoring_v3.types.metric_service import CreateTimeSeriesSummary +from google.cloud.monitoring_v3.types.metric_service import ( + DeleteMetricDescriptorRequest, +) +from google.cloud.monitoring_v3.types.metric_service import GetMetricDescriptorRequest +from google.cloud.monitoring_v3.types.metric_service import ( + GetMonitoredResourceDescriptorRequest, +) +from google.cloud.monitoring_v3.types.metric_service import ListMetricDescriptorsRequest +from google.cloud.monitoring_v3.types.metric_service import ( + ListMetricDescriptorsResponse, +) +from google.cloud.monitoring_v3.types.metric_service import ( + ListMonitoredResourceDescriptorsRequest, +) +from google.cloud.monitoring_v3.types.metric_service import ( + ListMonitoredResourceDescriptorsResponse, +) +from google.cloud.monitoring_v3.types.metric_service import ListTimeSeriesRequest +from google.cloud.monitoring_v3.types.metric_service import ListTimeSeriesResponse +from google.cloud.monitoring_v3.types.metric_service import QueryErrorList +from google.cloud.monitoring_v3.types.metric_service import QueryTimeSeriesRequest +from google.cloud.monitoring_v3.types.metric_service import QueryTimeSeriesResponse +from google.cloud.monitoring_v3.types.mutation_record import MutationRecord +from google.cloud.monitoring_v3.types.notification import NotificationChannel +from google.cloud.monitoring_v3.types.notification import NotificationChannelDescriptor +from google.cloud.monitoring_v3.types.notification_service import ( + CreateNotificationChannelRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + DeleteNotificationChannelRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + GetNotificationChannelDescriptorRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + GetNotificationChannelRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + GetNotificationChannelVerificationCodeRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + GetNotificationChannelVerificationCodeResponse, +) +from google.cloud.monitoring_v3.types.notification_service import ( + ListNotificationChannelDescriptorsRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + ListNotificationChannelDescriptorsResponse, +) +from google.cloud.monitoring_v3.types.notification_service import ( + ListNotificationChannelsRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + ListNotificationChannelsResponse, +) +from google.cloud.monitoring_v3.types.notification_service import ( + SendNotificationChannelVerificationCodeRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + UpdateNotificationChannelRequest, +) +from google.cloud.monitoring_v3.types.notification_service import ( + VerifyNotificationChannelRequest, +) +from google.cloud.monitoring_v3.types.service import BasicSli +from google.cloud.monitoring_v3.types.service import DistributionCut +from google.cloud.monitoring_v3.types.service import Range +from google.cloud.monitoring_v3.types.service import RequestBasedSli +from google.cloud.monitoring_v3.types.service import Service +from google.cloud.monitoring_v3.types.service import ServiceLevelIndicator +from google.cloud.monitoring_v3.types.service import ServiceLevelObjective +from google.cloud.monitoring_v3.types.service import TimeSeriesRatio +from google.cloud.monitoring_v3.types.service import WindowsBasedSli +from google.cloud.monitoring_v3.types.service_service import ( + CreateServiceLevelObjectiveRequest, +) +from google.cloud.monitoring_v3.types.service_service import CreateServiceRequest +from google.cloud.monitoring_v3.types.service_service import ( + DeleteServiceLevelObjectiveRequest, +) +from google.cloud.monitoring_v3.types.service_service import DeleteServiceRequest +from google.cloud.monitoring_v3.types.service_service import ( + GetServiceLevelObjectiveRequest, +) +from google.cloud.monitoring_v3.types.service_service import GetServiceRequest +from google.cloud.monitoring_v3.types.service_service import ( + ListServiceLevelObjectivesRequest, +) +from google.cloud.monitoring_v3.types.service_service import ( + ListServiceLevelObjectivesResponse, +) +from google.cloud.monitoring_v3.types.service_service import ListServicesRequest +from google.cloud.monitoring_v3.types.service_service import ListServicesResponse +from google.cloud.monitoring_v3.types.service_service import ( + UpdateServiceLevelObjectiveRequest, +) +from google.cloud.monitoring_v3.types.service_service import UpdateServiceRequest +from google.cloud.monitoring_v3.types.span_context import SpanContext +from google.cloud.monitoring_v3.types.uptime import GroupResourceType +from google.cloud.monitoring_v3.types.uptime import InternalChecker +from google.cloud.monitoring_v3.types.uptime import UptimeCheckConfig +from google.cloud.monitoring_v3.types.uptime import UptimeCheckIp +from google.cloud.monitoring_v3.types.uptime import UptimeCheckRegion +from google.cloud.monitoring_v3.types.uptime_service import ( + CreateUptimeCheckConfigRequest, +) +from google.cloud.monitoring_v3.types.uptime_service import ( + DeleteUptimeCheckConfigRequest, +) +from google.cloud.monitoring_v3.types.uptime_service import GetUptimeCheckConfigRequest +from google.cloud.monitoring_v3.types.uptime_service import ( + ListUptimeCheckConfigsRequest, +) +from google.cloud.monitoring_v3.types.uptime_service import ( + ListUptimeCheckConfigsResponse, +) +from google.cloud.monitoring_v3.types.uptime_service import ListUptimeCheckIpsRequest +from google.cloud.monitoring_v3.types.uptime_service import ListUptimeCheckIpsResponse +from google.cloud.monitoring_v3.types.uptime_service import ( + UpdateUptimeCheckConfigRequest, +) + +__all__ = ( + "Aggregation", + "AlertPolicy", + "AlertPolicyServiceAsyncClient", + "AlertPolicyServiceClient", + "BasicSli", + "ComparisonType", + "CreateAlertPolicyRequest", + "CreateGroupRequest", + "CreateMetricDescriptorRequest", + "CreateNotificationChannelRequest", + "CreateServiceLevelObjectiveRequest", + "CreateServiceRequest", + "CreateTimeSeriesError", + "CreateTimeSeriesRequest", + "CreateTimeSeriesSummary", + "CreateUptimeCheckConfigRequest", + "DeleteAlertPolicyRequest", + "DeleteGroupRequest", + "DeleteMetricDescriptorRequest", + "DeleteNotificationChannelRequest", + "DeleteServiceLevelObjectiveRequest", + "DeleteServiceRequest", + "DeleteUptimeCheckConfigRequest", + "DistributionCut", + "DroppedLabels", + "GetAlertPolicyRequest", + "GetGroupRequest", + "GetMetricDescriptorRequest", + "GetMonitoredResourceDescriptorRequest", + "GetNotificationChannelDescriptorRequest", + "GetNotificationChannelRequest", + "GetNotificationChannelVerificationCodeRequest", + "GetNotificationChannelVerificationCodeResponse", + "GetServiceLevelObjectiveRequest", + "GetServiceRequest", + "GetUptimeCheckConfigRequest", + "Group", + "GroupResourceType", + "GroupServiceAsyncClient", + "GroupServiceClient", + "InternalChecker", + "LabelValue", + "ListAlertPoliciesRequest", + "ListAlertPoliciesResponse", + "ListGroupMembersRequest", + "ListGroupMembersResponse", + "ListGroupsRequest", + "ListGroupsResponse", + "ListMetricDescriptorsRequest", + "ListMetricDescriptorsResponse", + "ListMonitoredResourceDescriptorsRequest", + "ListMonitoredResourceDescriptorsResponse", + "ListNotificationChannelDescriptorsRequest", + "ListNotificationChannelDescriptorsResponse", + "ListNotificationChannelsRequest", + "ListNotificationChannelsResponse", + "ListServiceLevelObjectivesRequest", + "ListServiceLevelObjectivesResponse", + "ListServicesRequest", + "ListServicesResponse", + "ListTimeSeriesRequest", + "ListTimeSeriesResponse", + "ListUptimeCheckConfigsRequest", + "ListUptimeCheckConfigsResponse", + "ListUptimeCheckIpsRequest", + "ListUptimeCheckIpsResponse", + "MetricServiceAsyncClient", + "MetricServiceClient", + "MutationRecord", + "NotificationChannel", + "NotificationChannelDescriptor", + "NotificationChannelServiceAsyncClient", + "NotificationChannelServiceClient", + "Point", + "QueryError", + "QueryErrorList", + "QueryTimeSeriesRequest", + "QueryTimeSeriesResponse", + "Range", + "RequestBasedSli", + "SendNotificationChannelVerificationCodeRequest", + "Service", + "ServiceLevelIndicator", + "ServiceLevelObjective", + "ServiceMonitoringServiceAsyncClient", + "ServiceMonitoringServiceClient", + "ServiceTier", + "SpanContext", + "TextLocator", + "TimeInterval", + "TimeSeries", + "TimeSeriesData", + "TimeSeriesDescriptor", + "TimeSeriesRatio", + "TypedValue", + "UpdateAlertPolicyRequest", + "UpdateGroupRequest", + "UpdateNotificationChannelRequest", + "UpdateServiceLevelObjectiveRequest", + "UpdateServiceRequest", + "UpdateUptimeCheckConfigRequest", + "UptimeCheckConfig", + "UptimeCheckIp", + "UptimeCheckRegion", + "UptimeCheckServiceAsyncClient", + "UptimeCheckServiceClient", + "VerifyNotificationChannelRequest", + "WindowsBasedSli", +) diff --git a/google/cloud/monitoring/py.typed b/google/cloud/monitoring/py.typed new file mode 100644 index 00000000..55d895b0 --- /dev/null +++ b/google/cloud/monitoring/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-monitoring package uses inline types. diff --git a/google/cloud/monitoring_v3/__init__.py b/google/cloud/monitoring_v3/__init__.py index 17105590..d4d15670 100644 --- a/google/cloud/monitoring_v3/__init__.py +++ b/google/cloud/monitoring_v3/__init__.py @@ -1,86 +1,225 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.monitoring_v3 import types -from google.cloud.monitoring_v3.gapic import alert_policy_service_client -from google.cloud.monitoring_v3.gapic import enums -from google.cloud.monitoring_v3.gapic import group_service_client -from google.cloud.monitoring_v3.gapic import metric_service_client -from google.cloud.monitoring_v3.gapic import ( - notification_channel_service_client as notification_client, -) -from google.cloud.monitoring_v3.gapic import service_monitoring_service_client -from google.cloud.monitoring_v3.gapic import uptime_check_service_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class AlertPolicyServiceClient(alert_policy_service_client.AlertPolicyServiceClient): - __doc__ = alert_policy_service_client.AlertPolicyServiceClient.__doc__ - enums = enums - - -class GroupServiceClient(group_service_client.GroupServiceClient): - __doc__ = group_service_client.GroupServiceClient.__doc__ - enums = enums - - -class MetricServiceClient(metric_service_client.MetricServiceClient): - __doc__ = metric_service_client.MetricServiceClient.__doc__ - enums = enums - - -class NotificationChannelServiceClient( - notification_client.NotificationChannelServiceClient -): - __doc__ = notification_client.NotificationChannelServiceClient.__doc__ - enums = enums - - -class ServiceMonitoringServiceClient( - service_monitoring_service_client.ServiceMonitoringServiceClient -): - __doc__ = service_monitoring_service_client.ServiceMonitoringServiceClient.__doc__ - enums = enums - - -class UptimeCheckServiceClient(uptime_check_service_client.UptimeCheckServiceClient): - __doc__ = uptime_check_service_client.UptimeCheckServiceClient.__doc__ - enums = enums +from .services.alert_policy_service import AlertPolicyServiceClient +from .services.group_service import GroupServiceClient +from .services.metric_service import MetricServiceClient +from .services.notification_channel_service import NotificationChannelServiceClient +from .services.service_monitoring_service import ServiceMonitoringServiceClient +from .services.uptime_check_service import UptimeCheckServiceClient +from .types.alert import AlertPolicy +from .types.alert_service import CreateAlertPolicyRequest +from .types.alert_service import DeleteAlertPolicyRequest +from .types.alert_service import GetAlertPolicyRequest +from .types.alert_service import ListAlertPoliciesRequest +from .types.alert_service import ListAlertPoliciesResponse +from .types.alert_service import UpdateAlertPolicyRequest +from .types.common import Aggregation +from .types.common import ComparisonType +from .types.common import ServiceTier +from .types.common import TimeInterval +from .types.common import TypedValue +from .types.dropped_labels import DroppedLabels +from .types.group import Group +from .types.group_service import CreateGroupRequest +from .types.group_service import DeleteGroupRequest +from .types.group_service import GetGroupRequest +from .types.group_service import ListGroupMembersRequest +from .types.group_service import ListGroupMembersResponse +from .types.group_service import ListGroupsRequest +from .types.group_service import ListGroupsResponse +from .types.group_service import UpdateGroupRequest +from .types.metric import LabelValue +from .types.metric import Point +from .types.metric import QueryError +from .types.metric import TextLocator +from .types.metric import TimeSeries +from .types.metric import TimeSeriesData +from .types.metric import TimeSeriesDescriptor +from .types.metric_service import CreateMetricDescriptorRequest +from .types.metric_service import CreateTimeSeriesError +from .types.metric_service import CreateTimeSeriesRequest +from .types.metric_service import CreateTimeSeriesSummary +from .types.metric_service import DeleteMetricDescriptorRequest +from .types.metric_service import GetMetricDescriptorRequest +from .types.metric_service import GetMonitoredResourceDescriptorRequest +from .types.metric_service import ListMetricDescriptorsRequest +from .types.metric_service import ListMetricDescriptorsResponse +from .types.metric_service import ListMonitoredResourceDescriptorsRequest +from .types.metric_service import ListMonitoredResourceDescriptorsResponse +from .types.metric_service import ListTimeSeriesRequest +from .types.metric_service import ListTimeSeriesResponse +from .types.metric_service import QueryErrorList +from .types.metric_service import QueryTimeSeriesRequest +from .types.metric_service import QueryTimeSeriesResponse +from .types.mutation_record import MutationRecord +from .types.notification import NotificationChannel +from .types.notification import NotificationChannelDescriptor +from .types.notification_service import CreateNotificationChannelRequest +from .types.notification_service import DeleteNotificationChannelRequest +from .types.notification_service import GetNotificationChannelDescriptorRequest +from .types.notification_service import GetNotificationChannelRequest +from .types.notification_service import GetNotificationChannelVerificationCodeRequest +from .types.notification_service import GetNotificationChannelVerificationCodeResponse +from .types.notification_service import ListNotificationChannelDescriptorsRequest +from .types.notification_service import ListNotificationChannelDescriptorsResponse +from .types.notification_service import ListNotificationChannelsRequest +from .types.notification_service import ListNotificationChannelsResponse +from .types.notification_service import SendNotificationChannelVerificationCodeRequest +from .types.notification_service import UpdateNotificationChannelRequest +from .types.notification_service import VerifyNotificationChannelRequest +from .types.service import BasicSli +from .types.service import DistributionCut +from .types.service import Range +from .types.service import RequestBasedSli +from .types.service import Service +from .types.service import ServiceLevelIndicator +from .types.service import ServiceLevelObjective +from .types.service import TimeSeriesRatio +from .types.service import WindowsBasedSli +from .types.service_service import CreateServiceLevelObjectiveRequest +from .types.service_service import CreateServiceRequest +from .types.service_service import DeleteServiceLevelObjectiveRequest +from .types.service_service import DeleteServiceRequest +from .types.service_service import GetServiceLevelObjectiveRequest +from .types.service_service import GetServiceRequest +from .types.service_service import ListServiceLevelObjectivesRequest +from .types.service_service import ListServiceLevelObjectivesResponse +from .types.service_service import ListServicesRequest +from .types.service_service import ListServicesResponse +from .types.service_service import UpdateServiceLevelObjectiveRequest +from .types.service_service import UpdateServiceRequest +from .types.span_context import SpanContext +from .types.uptime import GroupResourceType +from .types.uptime import InternalChecker +from .types.uptime import UptimeCheckConfig +from .types.uptime import UptimeCheckIp +from .types.uptime import UptimeCheckRegion +from .types.uptime_service import CreateUptimeCheckConfigRequest +from .types.uptime_service import DeleteUptimeCheckConfigRequest +from .types.uptime_service import GetUptimeCheckConfigRequest +from .types.uptime_service import ListUptimeCheckConfigsRequest +from .types.uptime_service import ListUptimeCheckConfigsResponse +from .types.uptime_service import ListUptimeCheckIpsRequest +from .types.uptime_service import ListUptimeCheckIpsResponse +from .types.uptime_service import UpdateUptimeCheckConfigRequest __all__ = ( - "enums", - "types", + "Aggregation", + "AlertPolicy", "AlertPolicyServiceClient", + "BasicSli", + "ComparisonType", + "CreateAlertPolicyRequest", + "CreateGroupRequest", + "CreateMetricDescriptorRequest", + "CreateNotificationChannelRequest", + "CreateServiceLevelObjectiveRequest", + "CreateServiceRequest", + "CreateTimeSeriesError", + "CreateTimeSeriesRequest", + "CreateTimeSeriesSummary", + "CreateUptimeCheckConfigRequest", + "DeleteAlertPolicyRequest", + "DeleteGroupRequest", + "DeleteMetricDescriptorRequest", + "DeleteNotificationChannelRequest", + "DeleteServiceLevelObjectiveRequest", + "DeleteServiceRequest", + "DeleteUptimeCheckConfigRequest", + "DistributionCut", + "DroppedLabels", + "GetAlertPolicyRequest", + "GetGroupRequest", + "GetMetricDescriptorRequest", + "GetMonitoredResourceDescriptorRequest", + "GetNotificationChannelDescriptorRequest", + "GetNotificationChannelRequest", + "GetNotificationChannelVerificationCodeRequest", + "GetNotificationChannelVerificationCodeResponse", + "GetServiceLevelObjectiveRequest", + "GetServiceRequest", + "GetUptimeCheckConfigRequest", + "Group", + "GroupResourceType", "GroupServiceClient", + "InternalChecker", + "LabelValue", + "ListAlertPoliciesRequest", + "ListAlertPoliciesResponse", + "ListGroupMembersRequest", + "ListGroupMembersResponse", + "ListGroupsRequest", + "ListGroupsResponse", + "ListMetricDescriptorsRequest", + "ListMetricDescriptorsResponse", + "ListMonitoredResourceDescriptorsRequest", + "ListMonitoredResourceDescriptorsResponse", + "ListNotificationChannelDescriptorsRequest", + "ListNotificationChannelDescriptorsResponse", + "ListNotificationChannelsRequest", + "ListNotificationChannelsResponse", + "ListServiceLevelObjectivesRequest", + "ListServiceLevelObjectivesResponse", + "ListServicesRequest", + "ListServicesResponse", + "ListTimeSeriesRequest", + "ListTimeSeriesResponse", + "ListUptimeCheckConfigsRequest", + "ListUptimeCheckConfigsResponse", + "ListUptimeCheckIpsRequest", + "ListUptimeCheckIpsResponse", "MetricServiceClient", + "MutationRecord", + "NotificationChannel", + "NotificationChannelDescriptor", "NotificationChannelServiceClient", + "Point", + "QueryError", + "QueryErrorList", + "QueryTimeSeriesRequest", + "QueryTimeSeriesResponse", + "Range", + "RequestBasedSli", + "SendNotificationChannelVerificationCodeRequest", + "Service", + "ServiceLevelIndicator", + "ServiceLevelObjective", "ServiceMonitoringServiceClient", + "ServiceTier", + "SpanContext", + "TextLocator", + "TimeInterval", + "TimeSeries", + "TimeSeriesData", + "TimeSeriesDescriptor", + "TimeSeriesRatio", + "TypedValue", + "UpdateAlertPolicyRequest", + "UpdateGroupRequest", + "UpdateNotificationChannelRequest", + "UpdateServiceLevelObjectiveRequest", + "UpdateServiceRequest", + "UpdateUptimeCheckConfigRequest", + "UptimeCheckConfig", + "UptimeCheckIp", + "UptimeCheckRegion", + "VerifyNotificationChannelRequest", + "WindowsBasedSli", "UptimeCheckServiceClient", ) diff --git a/google/cloud/monitoring_v3/gapic/__init__.py b/google/cloud/monitoring_v3/gapic/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/monitoring_v3/gapic/alert_policy_service_client.py b/google/cloud/monitoring_v3/gapic/alert_policy_service_client.py deleted file mode 100644 index 0e598c0e..00000000 --- a/google/cloud/monitoring_v3/gapic/alert_policy_service_client.py +++ /dev/null @@ -1,706 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.monitoring.v3 AlertPolicyService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.monitoring_v3.gapic import alert_policy_service_client_config -from google.cloud.monitoring_v3.gapic import enums -from google.cloud.monitoring_v3.gapic.transports import ( - alert_policy_service_grpc_transport, -) -from google.cloud.monitoring_v3.proto import alert_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-monitoring" -).version - - -class AlertPolicyServiceClient(object): - """ - The AlertPolicyService API is used to manage (list, create, delete, - edit) alert policies in Stackdriver Monitoring. An alerting policy is a - description of the conditions under which some aspect of your system is - considered to be "unhealthy" and the ways to notify people or services - about this state. In addition to using this API, alert policies can also - be managed through `Stackdriver - Monitoring `__, which can be - reached by clicking the "Monitoring" tab in `Cloud - Console `__. - """ - - SERVICE_ADDRESS = "monitoring.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.monitoring.v3.AlertPolicyService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AlertPolicyServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def alert_policy_path(cls, project, alert_policy): - """Return a fully-qualified alert_policy string.""" - return google.api_core.path_template.expand( - "projects/{project}/alertPolicies/{alert_policy}", - project=project, - alert_policy=alert_policy, - ) - - @classmethod - def alert_policy_condition_path(cls, project, alert_policy, condition): - """Return a fully-qualified alert_policy_condition string.""" - return google.api_core.path_template.expand( - "projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}", - project=project, - alert_policy=alert_policy, - condition=condition, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.AlertPolicyServiceGrpcTransport, - Callable[[~.Credentials, type], ~.AlertPolicyServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = alert_policy_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=alert_policy_service_grpc_transport.AlertPolicyServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = alert_policy_service_grpc_transport.AlertPolicyServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def list_alert_policies( - self, - name, - filter_=None, - order_by=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the existing alerting policies for the project. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.AlertPolicyServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_alert_policies(name): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_alert_policies(name).pages: - ... for element in page: - ... # process element - ... pass - - Args: - name (str): Required. The project whose alert policies are to be listed. The - format is: - - projects/[PROJECT_ID_OR_NUMBER] - - Note that this field names the parent container in which the alerting - policies to be listed are stored. To retrieve a single alerting policy - by name, use the ``GetAlertPolicy`` operation, instead. - filter_ (str): If provided, this field specifies the criteria that must be met by - alert policies to be included in the response. - - For more details, see `sorting and - filtering `__. - order_by (str): A comma-separated list of fields by which to sort the result. - Supports the same set of field references as the ``filter`` field. - Entries can be prefixed with a minus sign to sort by the field in - descending order. - - For more details, see `sorting and - filtering `__. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.AlertPolicy` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_alert_policies" not in self._inner_api_calls: - self._inner_api_calls[ - "list_alert_policies" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_alert_policies, - default_retry=self._method_configs["ListAlertPolicies"].retry, - default_timeout=self._method_configs["ListAlertPolicies"].timeout, - client_info=self._client_info, - ) - - request = alert_service_pb2.ListAlertPoliciesRequest( - name=name, filter=filter_, order_by=order_by, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_alert_policies"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="alert_policies", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_alert_policy( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single alerting policy. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.AlertPolicyServiceClient() - >>> - >>> name = client.alert_policy_path('[PROJECT]', '[ALERT_POLICY]') - >>> - >>> response = client.get_alert_policy(name) - - Args: - name (str): Required. The alerting policy to retrieve. The format is: - - projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.AlertPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_alert_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_alert_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_alert_policy, - default_retry=self._method_configs["GetAlertPolicy"].retry, - default_timeout=self._method_configs["GetAlertPolicy"].timeout, - client_info=self._client_info, - ) - - request = alert_service_pb2.GetAlertPolicyRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_alert_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_alert_policy( - self, - name, - alert_policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new alerting policy. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.AlertPolicyServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `alert_policy`: - >>> alert_policy = {} - >>> - >>> response = client.create_alert_policy(name, alert_policy) - - Args: - name (str): Required. The project in which to create the alerting policy. The - format is: - - projects/[PROJECT_ID_OR_NUMBER] - - Note that this field names the parent container in which the alerting - policy will be written, not the name of the created policy. The alerting - policy that is returned will have a name that contains a normalized - representation of this name as a prefix but adds a suffix of the form - ``/alertPolicies/[ALERT_POLICY_ID]``, identifying the policy in the - container. - alert_policy (Union[dict, ~google.cloud.monitoring_v3.types.AlertPolicy]): Required. The requested alerting policy. You should omit the - ``name`` field in this policy. The name will be returned in the new - policy, including a new ``[ALERT_POLICY_ID]`` value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.AlertPolicy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.AlertPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "create_alert_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "create_alert_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_alert_policy, - default_retry=self._method_configs["CreateAlertPolicy"].retry, - default_timeout=self._method_configs["CreateAlertPolicy"].timeout, - client_info=self._client_info, - ) - - request = alert_service_pb2.CreateAlertPolicyRequest( - name=name, alert_policy=alert_policy - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_alert_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_alert_policy( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an alerting policy. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.AlertPolicyServiceClient() - >>> - >>> name = client.alert_policy_path('[PROJECT]', '[ALERT_POLICY]') - >>> - >>> client.delete_alert_policy(name) - - Args: - name (str): Required. The alerting policy to delete. The format is: - - projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] - - For more information, see ``AlertPolicy``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "delete_alert_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_alert_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_alert_policy, - default_retry=self._method_configs["DeleteAlertPolicy"].retry, - default_timeout=self._method_configs["DeleteAlertPolicy"].timeout, - client_info=self._client_info, - ) - - request = alert_service_pb2.DeleteAlertPolicyRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_alert_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_alert_policy( - self, - alert_policy, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an alerting policy. You can either replace the entire policy - with a new one or replace only certain fields in the current alerting - policy by specifying the fields to be updated via ``updateMask``. - Returns the updated alerting policy. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.AlertPolicyServiceClient() - >>> - >>> # TODO: Initialize `alert_policy`: - >>> alert_policy = {} - >>> - >>> response = client.update_alert_policy(alert_policy) - - Args: - alert_policy (Union[dict, ~google.cloud.monitoring_v3.types.AlertPolicy]): Required. The updated alerting policy or the updated values for the - fields listed in ``update_mask``. If ``update_mask`` is not empty, any - fields in this policy that are not in ``update_mask`` are ignored. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.AlertPolicy` - update_mask (Union[dict, ~google.cloud.monitoring_v3.types.FieldMask]): Optional. A list of alerting policy field names. If this field is - not empty, each listed field in the existing alerting policy is set to - the value of the corresponding field in the supplied policy - (``alert_policy``), or to the field's default value if the field is not - in the supplied alerting policy. Fields not listed retain their previous - value. - - Examples of valid field masks include ``display_name``, - ``documentation``, ``documentation.content``, - ``documentation.mime_type``, ``user_labels``, ``user_label.nameofkey``, - ``enabled``, ``conditions``, ``combiner``, etc. - - If this field is empty, then the supplied alerting policy replaces the - existing policy. It is the same as deleting the existing policy and - adding the supplied policy, except for the following: - - - The new policy will have the same ``[ALERT_POLICY_ID]`` as the former - policy. This gives you continuity with the former policy in your - notifications and incidents. - - Conditions in the new policy will keep their former - ``[CONDITION_ID]`` if the supplied condition includes the ``name`` - field with that ``[CONDITION_ID]``. If the supplied condition omits - the ``name`` field, then a new ``[CONDITION_ID]`` is created. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.AlertPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "update_alert_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "update_alert_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_alert_policy, - default_retry=self._method_configs["UpdateAlertPolicy"].retry, - default_timeout=self._method_configs["UpdateAlertPolicy"].timeout, - client_info=self._client_info, - ) - - request = alert_service_pb2.UpdateAlertPolicyRequest( - alert_policy=alert_policy, update_mask=update_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("alert_policy.name", alert_policy.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_alert_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/monitoring_v3/gapic/alert_policy_service_client_config.py b/google/cloud/monitoring_v3/gapic/alert_policy_service_client_config.py deleted file mode 100644 index 5aed862a..00000000 --- a/google/cloud/monitoring_v3/gapic/alert_policy_service_client_config.py +++ /dev/null @@ -1,48 +0,0 @@ -config = { - "interfaces": { - "google.monitoring.v3.AlertPolicyService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "ListAlertPolicies": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetAlertPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateAlertPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteAlertPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "UpdateAlertPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/monitoring_v3/gapic/enums.py b/google/cloud/monitoring_v3/gapic/enums.py deleted file mode 100644 index 5a99006b..00000000 --- a/google/cloud/monitoring_v3/gapic/enums.py +++ /dev/null @@ -1,670 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class CalendarPeriod(enum.IntEnum): - """ - A ``CalendarPeriod`` represents the abstract concept of a time - period that has a canonical start. Grammatically, "the start of the - current ``CalendarPeriod``." All calendar times begin at midnight UTC. - - Attributes: - CALENDAR_PERIOD_UNSPECIFIED (int): Undefined period, raises an error. - DAY (int): A day. - WEEK (int): A week. Weeks begin on Monday, following `ISO - 8601 `__. - FORTNIGHT (int): A fortnight. The first calendar fortnight of the year begins at the - start of week 1 according to `ISO - 8601 `__. - MONTH (int): A month. - QUARTER (int): A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul, and 1-Oct of each - year. - HALF (int): A half-year. Half-years start on dates 1-Jan and 1-Jul. - YEAR (int): A year. - """ - - CALENDAR_PERIOD_UNSPECIFIED = 0 - DAY = 1 - WEEK = 2 - FORTNIGHT = 3 - MONTH = 4 - QUARTER = 5 - HALF = 6 - YEAR = 7 - - -class ComparisonType(enum.IntEnum): - """ - Specifies an ordering relationship on two arguments, called ``left`` - and ``right``. - - Attributes: - COMPARISON_UNSPECIFIED (int): No ordering relationship is specified. - COMPARISON_GT (int): True if the left argument is greater than the right argument. - COMPARISON_GE (int): True if the left argument is greater than or equal to the right argument. - COMPARISON_LT (int): True if the left argument is less than the right argument. - COMPARISON_LE (int): True if the left argument is less than or equal to the right argument. - COMPARISON_EQ (int): True if the left argument is equal to the right argument. - COMPARISON_NE (int): True if the left argument is not equal to the right argument. - """ - - COMPARISON_UNSPECIFIED = 0 - COMPARISON_GT = 1 - COMPARISON_GE = 2 - COMPARISON_LT = 3 - COMPARISON_LE = 4 - COMPARISON_EQ = 5 - COMPARISON_NE = 6 - - -class GroupResourceType(enum.IntEnum): - """ - The supported resource types that can be used as values of - ``group_resource.resource_type``. ``INSTANCE`` includes ``gce_instance`` - and ``aws_ec2_instance`` resource types. The resource types ``gae_app`` - and ``uptime_url`` are not valid here because group checks on App Engine - modules and URLs are not allowed. - - Attributes: - RESOURCE_TYPE_UNSPECIFIED (int): Default value (not valid). - INSTANCE (int): A group of instances from Google Cloud Platform (GCP) or - Amazon Web Services (AWS). - AWS_ELB_LOAD_BALANCER (int): A group of Amazon ELB load balancers. - """ - - RESOURCE_TYPE_UNSPECIFIED = 0 - INSTANCE = 1 - AWS_ELB_LOAD_BALANCER = 2 - - -class LaunchStage(enum.IntEnum): - """ - The launch stage as defined by `Google Cloud Platform Launch - Stages `__. - - Attributes: - LAUNCH_STAGE_UNSPECIFIED (int): Do not use this default value. - EARLY_ACCESS (int): Early Access features are limited to a closed group of testers. To use - these features, you must sign up in advance and sign a Trusted Tester - agreement (which includes confidentiality provisions). These features may - be unstable, changed in backward-incompatible ways, and are not - guaranteed to be released. - ALPHA (int): Alpha is a limited availability test for releases before they are cleared - for widespread use. By Alpha, all significant design issues are resolved - and we are in the process of verifying functionality. Alpha customers - need to apply for access, agree to applicable terms, and have their - projects whitelisted. Alpha releases don’t have to be feature complete, - no SLAs are provided, and there are no technical support obligations, but - they will be far enough along that customers can actually use them in - test environments or for limited-use tests -- just like they would in - normal production cases. - BETA (int): Beta is the point at which we are ready to open a release for any - customer to use. There are no SLA or technical support obligations in a - Beta release. Products will be complete from a feature perspective, but - may have some open outstanding issues. Beta releases are suitable for - limited production use cases. - GA (int): GA features are open to all developers and are considered stable and - fully qualified for production use. - DEPRECATED (int): Deprecated features are scheduled to be shut down and removed. For - more information, see the “Deprecation Policy” section of our `Terms of - Service `__ and the `Google Cloud - Platform Subject to the Deprecation - Policy `__ documentation. - """ - - LAUNCH_STAGE_UNSPECIFIED = 0 - EARLY_ACCESS = 1 - ALPHA = 2 - BETA = 3 - GA = 4 - DEPRECATED = 5 - - -class NullValue(enum.IntEnum): - """ - ``NullValue`` is a singleton enumeration to represent the null value - for the ``Value`` type union. - - The JSON representation for ``NullValue`` is JSON ``null``. - - Attributes: - NULL_VALUE (int): Null value. - """ - - NULL_VALUE = 0 - - -class ServiceTier(enum.IntEnum): - """ - The tier of service for a Workspace. Please see the `service tiers - documentation `__ - for more details. - - Attributes: - SERVICE_TIER_UNSPECIFIED (int): An invalid sentinel value, used to indicate that a tier has not - been provided explicitly. - SERVICE_TIER_BASIC (int): The Stackdriver Basic tier, a free tier of service that provides - basic features, a moderate allotment of logs, and access to built-in - metrics. A number of features are not available in this tier. For more - details, see `the service tiers - documentation `__. - SERVICE_TIER_PREMIUM (int): The Stackdriver Premium tier, a higher, more expensive tier of - service that provides access to all Stackdriver features, lets you use - Stackdriver with AWS accounts, and has a larger allotments for logs and - metrics. For more details, see `the service tiers - documentation `__. - """ - - SERVICE_TIER_UNSPECIFIED = 0 - SERVICE_TIER_BASIC = 1 - SERVICE_TIER_PREMIUM = 2 - - -class UptimeCheckRegion(enum.IntEnum): - """ - The regions from which an Uptime check can be run. - - Attributes: - REGION_UNSPECIFIED (int): Default value if no region is specified. Will result in Uptime checks - running from all regions. - USA (int): Allows checks to run from locations within the United States of America. - EUROPE (int): Allows checks to run from locations within the continent of Europe. - SOUTH_AMERICA (int): Allows checks to run from locations within the continent of South - America. - ASIA_PACIFIC (int): Allows checks to run from locations within the Asia Pacific area (ex: - Singapore). - """ - - REGION_UNSPECIFIED = 0 - USA = 1 - EUROPE = 2 - SOUTH_AMERICA = 3 - ASIA_PACIFIC = 4 - - -class Aggregation(object): - class Aligner(enum.IntEnum): - """ - The ``Aligner`` specifies the operation that will be applied to the - data points in each alignment period in a time series. Except for - ``ALIGN_NONE``, which specifies that no operation be applied, each - alignment operation replaces the set of data values in each alignment - period with a single value: the result of applying the operation to the - data values. An aligned time series has a single data value at the end - of each ``alignment_period``. - - An alignment operation can change the data type of the values, too. For - example, if you apply a counting operation to boolean values, the data - ``value_type`` in the original time series is ``BOOLEAN``, but the - ``value_type`` in the aligned result is ``INT64``. - - Attributes: - ALIGN_NONE (int): No alignment. Raw data is returned. Not valid if cross-series - reduction is requested. The ``value_type`` of the result is the same as - the ``value_type`` of the input. - ALIGN_DELTA (int): Align and convert to ``DELTA``. The output is ``delta = y1 - y0``. - - This alignment is valid for ``CUMULATIVE`` and ``DELTA`` metrics. If the - selected alignment period results in periods with no data, then the - aligned value for such a period is created by interpolation. The - ``value_type`` of the aligned result is the same as the ``value_type`` - of the input. - ALIGN_RATE (int): Align and convert to a rate. The result is computed as - ``rate = (y1 - y0)/(t1 - t0)``, or "delta over time". Think of this - aligner as providing the slope of the line that passes through the value - at the start and at the end of the ``alignment_period``. - - This aligner is valid for ``CUMULATIVE`` and ``DELTA`` metrics with - numeric values. If the selected alignment period results in periods with - no data, then the aligned value for such a period is created by - interpolation. The output is a ``GAUGE`` metric with ``value_type`` - ``DOUBLE``. - - If, by "rate", you mean "percentage change", see the - ``ALIGN_PERCENT_CHANGE`` aligner instead. - ALIGN_INTERPOLATE (int): Align by interpolating between adjacent points around the alignment - period boundary. This aligner is valid for ``GAUGE`` metrics with - numeric values. The ``value_type`` of the aligned result is the same as - the ``value_type`` of the input. - ALIGN_NEXT_OLDER (int): Align by moving the most recent data point before the end of the - alignment period to the boundary at the end of the alignment period. - This aligner is valid for ``GAUGE`` metrics. The ``value_type`` of the - aligned result is the same as the ``value_type`` of the input. - ALIGN_MIN (int): Align the time series by returning the minimum value in each - alignment period. This aligner is valid for ``GAUGE`` and ``DELTA`` - metrics with numeric values. The ``value_type`` of the aligned result is - the same as the ``value_type`` of the input. - ALIGN_MAX (int): Align the time series by returning the maximum value in each - alignment period. This aligner is valid for ``GAUGE`` and ``DELTA`` - metrics with numeric values. The ``value_type`` of the aligned result is - the same as the ``value_type`` of the input. - ALIGN_MEAN (int): Align the time series by returning the mean value in each alignment - period. This aligner is valid for ``GAUGE`` and ``DELTA`` metrics with - numeric values. The ``value_type`` of the aligned result is ``DOUBLE``. - ALIGN_COUNT (int): Align the time series by returning the number of values in each - alignment period. This aligner is valid for ``GAUGE`` and ``DELTA`` - metrics with numeric or Boolean values. The ``value_type`` of the - aligned result is ``INT64``. - ALIGN_SUM (int): Align the time series by returning the sum of the values in each - alignment period. This aligner is valid for ``GAUGE`` and ``DELTA`` - metrics with numeric and distribution values. The ``value_type`` of the - aligned result is the same as the ``value_type`` of the input. - ALIGN_STDDEV (int): Align the time series by returning the standard deviation of the - values in each alignment period. This aligner is valid for ``GAUGE`` and - ``DELTA`` metrics with numeric values. The ``value_type`` of the output - is ``DOUBLE``. - ALIGN_COUNT_TRUE (int): Align the time series by returning the number of ``True`` values in - each alignment period. This aligner is valid for ``GAUGE`` metrics with - Boolean values. The ``value_type`` of the output is ``INT64``. - ALIGN_COUNT_FALSE (int): Align the time series by returning the number of ``False`` values in - each alignment period. This aligner is valid for ``GAUGE`` metrics with - Boolean values. The ``value_type`` of the output is ``INT64``. - ALIGN_FRACTION_TRUE (int): Align the time series by returning the ratio of the number of - ``True`` values to the total number of values in each alignment period. - This aligner is valid for ``GAUGE`` metrics with Boolean values. The - output value is in the range [0.0, 1.0] and has ``value_type`` - ``DOUBLE``. - ALIGN_PERCENTILE_99 (int): Align the time series by using `percentile - aggregation `__. The resulting - data point in each alignment period is the 99th percentile of all data - points in the period. This aligner is valid for ``GAUGE`` and ``DELTA`` - metrics with distribution values. The output is a ``GAUGE`` metric with - ``value_type`` ``DOUBLE``. - ALIGN_PERCENTILE_95 (int): Align the time series by using `percentile - aggregation `__. The resulting - data point in each alignment period is the 95th percentile of all data - points in the period. This aligner is valid for ``GAUGE`` and ``DELTA`` - metrics with distribution values. The output is a ``GAUGE`` metric with - ``value_type`` ``DOUBLE``. - ALIGN_PERCENTILE_50 (int): Align the time series by using `percentile - aggregation `__. The resulting - data point in each alignment period is the 50th percentile of all data - points in the period. This aligner is valid for ``GAUGE`` and ``DELTA`` - metrics with distribution values. The output is a ``GAUGE`` metric with - ``value_type`` ``DOUBLE``. - ALIGN_PERCENTILE_05 (int): Align the time series by using `percentile - aggregation `__. The resulting - data point in each alignment period is the 5th percentile of all data - points in the period. This aligner is valid for ``GAUGE`` and ``DELTA`` - metrics with distribution values. The output is a ``GAUGE`` metric with - ``value_type`` ``DOUBLE``. - ALIGN_PERCENT_CHANGE (int): Align and convert to a percentage change. This aligner is valid for - ``GAUGE`` and ``DELTA`` metrics with numeric values. This alignment - returns ``((current - previous)/previous) * 100``, where the value of - ``previous`` is determined based on the ``alignment_period``. - - If the values of ``current`` and ``previous`` are both 0, then the - returned value is 0. If only ``previous`` is 0, the returned value is - infinity. - - A 10-minute moving mean is computed at each point of the alignment - period prior to the above calculation to smooth the metric and prevent - false positives from very short-lived spikes. The moving mean is only - applicable for data whose values are ``>= 0``. Any values ``< 0`` are - treated as a missing datapoint, and are ignored. While ``DELTA`` metrics - are accepted by this alignment, special care should be taken that the - values for the metric will always be positive. The output is a ``GAUGE`` - metric with ``value_type`` ``DOUBLE``. - """ - - ALIGN_NONE = 0 - ALIGN_DELTA = 1 - ALIGN_RATE = 2 - ALIGN_INTERPOLATE = 3 - ALIGN_NEXT_OLDER = 4 - ALIGN_MIN = 10 - ALIGN_MAX = 11 - ALIGN_MEAN = 12 - ALIGN_COUNT = 13 - ALIGN_SUM = 14 - ALIGN_STDDEV = 15 - ALIGN_COUNT_TRUE = 16 - ALIGN_COUNT_FALSE = 24 - ALIGN_FRACTION_TRUE = 17 - ALIGN_PERCENTILE_99 = 18 - ALIGN_PERCENTILE_95 = 19 - ALIGN_PERCENTILE_50 = 20 - ALIGN_PERCENTILE_05 = 21 - ALIGN_PERCENT_CHANGE = 23 - - class Reducer(enum.IntEnum): - """ - A Reducer operation describes how to aggregate data points from multiple - time series into a single time series, where the value of each data point - in the resulting series is a function of all the already aligned values in - the input time series. - - Attributes: - REDUCE_NONE (int): No cross-time series reduction. The output of the ``Aligner`` is - returned. - REDUCE_MEAN (int): Reduce by computing the mean value across time series for each - alignment period. This reducer is valid for ``DELTA`` and ``GAUGE`` - metrics with numeric or distribution values. The ``value_type`` of the - output is ``DOUBLE``. - REDUCE_MIN (int): Reduce by computing the minimum value across time series for each - alignment period. This reducer is valid for ``DELTA`` and ``GAUGE`` - metrics with numeric values. The ``value_type`` of the output is the - same as the ``value_type`` of the input. - REDUCE_MAX (int): Reduce by computing the maximum value across time series for each - alignment period. This reducer is valid for ``DELTA`` and ``GAUGE`` - metrics with numeric values. The ``value_type`` of the output is the - same as the ``value_type`` of the input. - REDUCE_SUM (int): Reduce by computing the sum across time series for each alignment - period. This reducer is valid for ``DELTA`` and ``GAUGE`` metrics with - numeric and distribution values. The ``value_type`` of the output is the - same as the ``value_type`` of the input. - REDUCE_STDDEV (int): Reduce by computing the standard deviation across time series for - each alignment period. This reducer is valid for ``DELTA`` and ``GAUGE`` - metrics with numeric or distribution values. The ``value_type`` of the - output is ``DOUBLE``. - REDUCE_COUNT (int): Reduce by computing the number of data points across time series for - each alignment period. This reducer is valid for ``DELTA`` and ``GAUGE`` - metrics of numeric, Boolean, distribution, and string ``value_type``. - The ``value_type`` of the output is ``INT64``. - REDUCE_COUNT_TRUE (int): Reduce by computing the number of ``True``-valued data points across - time series for each alignment period. This reducer is valid for - ``DELTA`` and ``GAUGE`` metrics of Boolean ``value_type``. The - ``value_type`` of the output is ``INT64``. - REDUCE_COUNT_FALSE (int): Reduce by computing the number of ``False``-valued data points - across time series for each alignment period. This reducer is valid for - ``DELTA`` and ``GAUGE`` metrics of Boolean ``value_type``. The - ``value_type`` of the output is ``INT64``. - REDUCE_FRACTION_TRUE (int): Reduce by computing the ratio of the number of ``True``-valued data - points to the total number of data points for each alignment period. - This reducer is valid for ``DELTA`` and ``GAUGE`` metrics of Boolean - ``value_type``. The output value is in the range [0.0, 1.0] and has - ``value_type`` ``DOUBLE``. - REDUCE_PERCENTILE_99 (int): Reduce by computing the `99th - percentile `__ of data points - across time series for each alignment period. This reducer is valid for - ``GAUGE`` and ``DELTA`` metrics of numeric and distribution type. The - value of the output is ``DOUBLE``. - REDUCE_PERCENTILE_95 (int): Reduce by computing the `95th - percentile `__ of data points - across time series for each alignment period. This reducer is valid for - ``GAUGE`` and ``DELTA`` metrics of numeric and distribution type. The - value of the output is ``DOUBLE``. - REDUCE_PERCENTILE_50 (int): Reduce by computing the `50th - percentile `__ of data points - across time series for each alignment period. This reducer is valid for - ``GAUGE`` and ``DELTA`` metrics of numeric and distribution type. The - value of the output is ``DOUBLE``. - REDUCE_PERCENTILE_05 (int): Reduce by computing the `5th - percentile `__ of data points - across time series for each alignment period. This reducer is valid for - ``GAUGE`` and ``DELTA`` metrics of numeric and distribution type. The - value of the output is ``DOUBLE``. - """ - - REDUCE_NONE = 0 - REDUCE_MEAN = 1 - REDUCE_MIN = 2 - REDUCE_MAX = 3 - REDUCE_SUM = 4 - REDUCE_STDDEV = 5 - REDUCE_COUNT = 6 - REDUCE_COUNT_TRUE = 7 - REDUCE_COUNT_FALSE = 15 - REDUCE_FRACTION_TRUE = 8 - REDUCE_PERCENTILE_99 = 9 - REDUCE_PERCENTILE_95 = 10 - REDUCE_PERCENTILE_50 = 11 - REDUCE_PERCENTILE_05 = 12 - - -class AlertPolicy(object): - class ConditionCombinerType(enum.IntEnum): - """ - Operators for combining conditions. - - Attributes: - COMBINE_UNSPECIFIED (int): An unspecified combiner. - AND (int): Combine conditions using the logical ``AND`` operator. An incident - is created only if all the conditions are met simultaneously. This - combiner is satisfied if all conditions are met, even if they are met on - completely different resources. - OR (int): Combine conditions using the logical ``OR`` operator. An incident is - created if any of the listed conditions is met. - AND_WITH_MATCHING_RESOURCE (int): Combine conditions using logical ``AND`` operator, but unlike the - regular ``AND`` option, an incident is created only if all conditions - are met simultaneously on at least one resource. - """ - - COMBINE_UNSPECIFIED = 0 - AND = 1 - OR = 2 - AND_WITH_MATCHING_RESOURCE = 3 - - -class InternalChecker(object): - class State(enum.IntEnum): - """ - Operational states for an internal checker. - - Attributes: - UNSPECIFIED (int): An internal checker should never be in the unspecified state. - CREATING (int): The checker is being created, provisioned, and configured. A checker - in this state can be returned by ``ListInternalCheckers`` or - ``GetInternalChecker``, as well as by examining the `long running - Operation `__ - that created it. - RUNNING (int): The checker is running and available for use. A checker in this - state can be returned by ``ListInternalCheckers`` or - ``GetInternalChecker`` as well as by examining the `long running - Operation `__ - that created it. If a checker is being torn down, it is neither visible - nor usable, so there is no "deleting" or "down" state. - """ - - UNSPECIFIED = 0 - CREATING = 1 - RUNNING = 2 - - -class LabelDescriptor(object): - class ValueType(enum.IntEnum): - """ - Value types that can be used as label values. - - Attributes: - STRING (int): A variable-length string. This is the default. - BOOL (int): Boolean; true or false. - INT64 (int): A 64-bit signed integer. - """ - - STRING = 0 - BOOL = 1 - INT64 = 2 - - -class ListTimeSeriesRequest(object): - class TimeSeriesView(enum.IntEnum): - """ - Controls which fields are returned by ``ListTimeSeries``. - - Attributes: - FULL (int): Returns the identity of the metric(s), the time series, - and the time series data. - HEADERS (int): Returns the identity of the metric and the time series resource, - but not the time series data. - """ - - FULL = 0 - HEADERS = 1 - - -class MetricDescriptor(object): - class MetricKind(enum.IntEnum): - """ - The kind of measurement. It describes how the data is reported. - - Attributes: - METRIC_KIND_UNSPECIFIED (int): Do not use this default value. - GAUGE (int): An instantaneous measurement of a value. - DELTA (int): The change in a value during a time interval. - CUMULATIVE (int): A value accumulated over a time interval. Cumulative - measurements in a time series should have the same start time - and increasing end times, until an event resets the cumulative - value to zero and sets a new start time for the following - points. - """ - - METRIC_KIND_UNSPECIFIED = 0 - GAUGE = 1 - DELTA = 2 - CUMULATIVE = 3 - - class ValueType(enum.IntEnum): - """ - The value type of a metric. - - Attributes: - VALUE_TYPE_UNSPECIFIED (int): Do not use this default value. - BOOL (int): The value is a boolean. This value type can be used only if the - metric kind is ``GAUGE``. - INT64 (int): The value is a signed 64-bit integer. - DOUBLE (int): The value is a double precision floating point number. - STRING (int): The value is a text string. This value type can be used only if the - metric kind is ``GAUGE``. - DISTRIBUTION (int): The value is a ``Distribution``. - MONEY (int): The value is money. - """ - - VALUE_TYPE_UNSPECIFIED = 0 - BOOL = 1 - INT64 = 2 - DOUBLE = 3 - STRING = 4 - DISTRIBUTION = 5 - MONEY = 6 - - -class NotificationChannel(object): - class VerificationStatus(enum.IntEnum): - """ - Indicates whether the channel has been verified or not. It is - illegal to specify this field in a ``CreateNotificationChannel`` or an - ``UpdateNotificationChannel`` operation. - - Attributes: - VERIFICATION_STATUS_UNSPECIFIED (int): Sentinel value used to indicate that the state is unknown, omitted, or - is not applicable (as in the case of channels that neither support - nor require verification in order to function). - UNVERIFIED (int): The channel has yet to be verified and requires verification to function. - Note that this state also applies to the case where the verification - process has been initiated by sending a verification code but where - the verification code has not been submitted to complete the process. - VERIFIED (int): It has been proven that notifications can be received on this - notification channel and that someone on the project has access - to messages that are delivered to that channel. - """ - - VERIFICATION_STATUS_UNSPECIFIED = 0 - UNVERIFIED = 1 - VERIFIED = 2 - - -class ServiceLevelObjective(object): - class View(enum.IntEnum): - """ - ``ServiceLevelObjective.View`` determines what form of - ``ServiceLevelObjective`` is returned from ``GetServiceLevelObjective``, - ``ListServiceLevelObjectives``, and - ``ListServiceLevelObjectiveVersions`` RPCs. - - Attributes: - VIEW_UNSPECIFIED (int): Same as FULL. - FULL (int): Return the embedded ``ServiceLevelIndicator`` in the form in which - it was defined. If it was defined using a ``BasicSli``, return that - ``BasicSli``. - EXPLICIT (int): For ``ServiceLevelIndicator``\ s using ``BasicSli`` articulation, - instead return the ``ServiceLevelIndicator`` with its mode of - computation fully spelled out as a ``RequestBasedSli``. For - ``ServiceLevelIndicator``\ s using ``RequestBasedSli`` or - ``WindowsBasedSli``, return the ``ServiceLevelIndicator`` as it was - provided. - """ - - VIEW_UNSPECIFIED = 0 - FULL = 2 - EXPLICIT = 1 - - -class UptimeCheckConfig(object): - class HttpCheck(object): - class ContentType(enum.IntEnum): - """ - Header options corresponding to the Content-Type of the body in HTTP - requests. Note that a ``Content-Type`` header cannot be present in the - ``headers`` field if this field is specified. - - Attributes: - TYPE_UNSPECIFIED (int): No content type specified. If the request method is POST, an - unspecified content type results in a check creation rejection. - URL_ENCODED (int): ``body`` is in URL-encoded form. Equivalent to setting the - ``Content-Type`` to ``application/x-www-form-urlencoded`` in the HTTP - request. - """ - - TYPE_UNSPECIFIED = 0 - URL_ENCODED = 1 - - class RequestMethod(enum.IntEnum): - """ - The HTTP request method options. - - Attributes: - METHOD_UNSPECIFIED (int): No request method specified. - GET (int): GET request. - POST (int): POST request. - """ - - METHOD_UNSPECIFIED = 0 - GET = 1 - POST = 2 - - class ContentMatcher(object): - class ContentMatcherOption(enum.IntEnum): - """ - Options to perform content matching. - - Attributes: - CONTENT_MATCHER_OPTION_UNSPECIFIED (int): No content matcher type specified (maintained for backward - compatibility, but deprecated for future use). Treated as - ``CONTAINS_STRING``. - CONTAINS_STRING (int): Selects substring matching. The match succeeds if the output - contains the ``content`` string. This is the default value for checks - without a ``matcher`` option, or where the value of ``matcher`` is - ``CONTENT_MATCHER_OPTION_UNSPECIFIED``. - NOT_CONTAINS_STRING (int): Selects negation of substring matching. The match succeeds if the - output does *NOT* contain the ``content`` string. - MATCHES_REGEX (int): Selects regular-expression matching. The match succeeds of the - output matches the regular expression specified in the ``content`` - string. - NOT_MATCHES_REGEX (int): Selects negation of regular-expression matching. The match succeeds - if the output does *NOT* match the regular expression specified in the - ``content`` string. - """ - - CONTENT_MATCHER_OPTION_UNSPECIFIED = 0 - CONTAINS_STRING = 1 - NOT_CONTAINS_STRING = 2 - MATCHES_REGEX = 3 - NOT_MATCHES_REGEX = 4 diff --git a/google/cloud/monitoring_v3/gapic/group_service_client.py b/google/cloud/monitoring_v3/gapic/group_service_client.py deleted file mode 100644 index b7997312..00000000 --- a/google/cloud/monitoring_v3/gapic/group_service_client.py +++ /dev/null @@ -1,827 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.monitoring.v3 GroupService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.monitoring_v3.gapic import enums -from google.cloud.monitoring_v3.gapic import group_service_client_config -from google.cloud.monitoring_v3.gapic.transports import group_service_grpc_transport -from google.cloud.monitoring_v3.proto import alert_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc -from google.cloud.monitoring_v3.proto import common_pb2 -from google.cloud.monitoring_v3.proto import group_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2_grpc -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-monitoring" -).version - - -class GroupServiceClient(object): - """ - The Group API lets you inspect and manage your - `groups <#google.monitoring.v3.Group>`__. - - A group is a named filter that is used to identify a collection of - monitored resources. Groups are typically used to mirror the physical - and/or logical topology of the environment. Because group membership is - computed dynamically, monitored resources that are started in the future - are automatically placed in matching groups. By using a group to name - monitored resources in, for example, an alert policy, the target of that - alert policy is updated automatically as monitored resources are added - and removed from the infrastructure. - """ - - SERVICE_ADDRESS = "monitoring.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.monitoring.v3.GroupService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - GroupServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def group_path(cls, project, group): - """Return a fully-qualified group string.""" - return google.api_core.path_template.expand( - "projects/{project}/groups/{group}", project=project, group=group - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.GroupServiceGrpcTransport, - Callable[[~.Credentials, type], ~.GroupServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = group_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=group_service_grpc_transport.GroupServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = group_service_grpc_transport.GroupServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def list_groups( - self, - name, - children_of_group=None, - ancestors_of_group=None, - descendants_of_group=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the existing groups. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.GroupServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_groups(name): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_groups(name).pages: - ... for element in page: - ... # process element - ... pass - - Args: - name (str): Required. The project whose groups are to be listed. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - children_of_group (str): A group name. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - - Returns groups whose ``parent_name`` field contains the group name. If - no groups have this parent, the results are empty. - ancestors_of_group (str): A group name. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - - Returns groups that are ancestors of the specified group. The groups are - returned in order, starting with the immediate parent and ending with - the most distant ancestor. If the specified group has no immediate - parent, the results are empty. - descendants_of_group (str): A group name. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - - Returns the descendants of the specified group. This is a superset of - the results returned by the ``children_of_group`` filter, and includes - children-of-children, and so forth. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.Group` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_groups" not in self._inner_api_calls: - self._inner_api_calls[ - "list_groups" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_groups, - default_retry=self._method_configs["ListGroups"].retry, - default_timeout=self._method_configs["ListGroups"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - children_of_group=children_of_group, - ancestors_of_group=ancestors_of_group, - descendants_of_group=descendants_of_group, - ) - - request = group_service_pb2.ListGroupsRequest( - name=name, - children_of_group=children_of_group, - ancestors_of_group=ancestors_of_group, - descendants_of_group=descendants_of_group, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_groups"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="group", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_group( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single group. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.GroupServiceClient() - >>> - >>> name = client.group_path('[PROJECT]', '[GROUP]') - >>> - >>> response = client.get_group(name) - - Args: - name (str): Required. The group to retrieve. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.Group` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_group" not in self._inner_api_calls: - self._inner_api_calls[ - "get_group" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_group, - default_retry=self._method_configs["GetGroup"].retry, - default_timeout=self._method_configs["GetGroup"].timeout, - client_info=self._client_info, - ) - - request = group_service_pb2.GetGroupRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_group"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_group( - self, - name, - group, - validate_only=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new group. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.GroupServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `group`: - >>> group = {} - >>> - >>> response = client.create_group(name, group) - - Args: - name (str): Required. The project in which to create the group. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - group (Union[dict, ~google.cloud.monitoring_v3.types.Group]): Required. A group definition. It is an error to define the ``name`` - field because the system assigns the name. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.Group` - validate_only (bool): If true, validate this request but do not create the group. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.Group` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "create_group" not in self._inner_api_calls: - self._inner_api_calls[ - "create_group" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_group, - default_retry=self._method_configs["CreateGroup"].retry, - default_timeout=self._method_configs["CreateGroup"].timeout, - client_info=self._client_info, - ) - - request = group_service_pb2.CreateGroupRequest( - name=name, group=group, validate_only=validate_only - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_group"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_group( - self, - group, - validate_only=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an existing group. You can change any group attributes - except ``name``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.GroupServiceClient() - >>> - >>> # TODO: Initialize `group`: - >>> group = {} - >>> - >>> response = client.update_group(group) - - Args: - group (Union[dict, ~google.cloud.monitoring_v3.types.Group]): Required. The new definition of the group. All fields of the - existing group, excepting ``name``, are replaced with the corresponding - fields of this group. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.Group` - validate_only (bool): If true, validate this request but do not update the existing group. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.Group` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "update_group" not in self._inner_api_calls: - self._inner_api_calls[ - "update_group" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_group, - default_retry=self._method_configs["UpdateGroup"].retry, - default_timeout=self._method_configs["UpdateGroup"].timeout, - client_info=self._client_info, - ) - - request = group_service_pb2.UpdateGroupRequest( - group=group, validate_only=validate_only - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("group.name", group.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_group"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_group( - self, - name, - recursive=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an existing group. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.GroupServiceClient() - >>> - >>> name = client.group_path('[PROJECT]', '[GROUP]') - >>> - >>> client.delete_group(name) - - Args: - name (str): Required. The group to delete. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - recursive (bool): If this field is true, then the request means to delete a group with all - its descendants. Otherwise, the request means to delete a group only when - it has no descendants. The default value is false. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "delete_group" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_group" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_group, - default_retry=self._method_configs["DeleteGroup"].retry, - default_timeout=self._method_configs["DeleteGroup"].timeout, - client_info=self._client_info, - ) - - request = group_service_pb2.DeleteGroupRequest(name=name, recursive=recursive) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_group"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_group_members( - self, - name, - page_size=None, - filter_=None, - interval=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the monitored resources that are members of a group. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.GroupServiceClient() - >>> - >>> name = client.group_path('[PROJECT]', '[GROUP]') - >>> - >>> # Iterate over all results - >>> for element in client.list_group_members(name): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_group_members(name).pages: - ... for element in page: - ... # process element - ... pass - - Args: - name (str): Required. The group whose members are listed. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - filter_ (str): An optional `list - filter `__ - describing the members to be returned. The filter may reference the - type, labels, and metadata of monitored resources that comprise the - group. For example, to return only resources representing Compute Engine - VM instances, use this filter: - - :: - - `resource.type = "gce_instance"` - interval (Union[dict, ~google.cloud.monitoring_v3.types.TimeInterval]): An optional time interval for which results should be returned. Only - members that were part of the group during the specified interval are - included in the response. If no interval is provided then the group - membership over the last minute is returned. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.TimeInterval` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.MonitoredResource` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_group_members" not in self._inner_api_calls: - self._inner_api_calls[ - "list_group_members" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_group_members, - default_retry=self._method_configs["ListGroupMembers"].retry, - default_timeout=self._method_configs["ListGroupMembers"].timeout, - client_info=self._client_info, - ) - - request = group_service_pb2.ListGroupMembersRequest( - name=name, page_size=page_size, filter=filter_, interval=interval - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_group_members"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="members", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/google/cloud/monitoring_v3/gapic/group_service_client_config.py b/google/cloud/monitoring_v3/gapic/group_service_client_config.py deleted file mode 100644 index 786ec6ba..00000000 --- a/google/cloud/monitoring_v3/gapic/group_service_client_config.py +++ /dev/null @@ -1,53 +0,0 @@ -config = { - "interfaces": { - "google.monitoring.v3.GroupService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "ListGroups": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetGroup": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateGroup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateGroup": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteGroup": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListGroupMembers": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/monitoring_v3/gapic/metric_service_client.py b/google/cloud/monitoring_v3/gapic/metric_service_client.py deleted file mode 100644 index 991beaf5..00000000 --- a/google/cloud/monitoring_v3/gapic/metric_service_client.py +++ /dev/null @@ -1,1061 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.monitoring.v3 MetricService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.api import metric_pb2 as api_metric_pb2 -from google.api import monitored_resource_pb2 -from google.cloud.monitoring_v3.gapic import enums -from google.cloud.monitoring_v3.gapic import metric_service_client_config -from google.cloud.monitoring_v3.gapic.transports import metric_service_grpc_transport -from google.cloud.monitoring_v3.proto import alert_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc -from google.cloud.monitoring_v3.proto import common_pb2 -from google.cloud.monitoring_v3.proto import group_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2_grpc -from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-monitoring" -).version - - -class MetricServiceClient(object): - """ - Manages metric descriptors, monitored resource descriptors, and - time series data. - """ - - SERVICE_ADDRESS = "monitoring.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.monitoring.v3.MetricService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetricServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def metric_descriptor_path(cls, project, metric_descriptor): - """Return a fully-qualified metric_descriptor string.""" - return google.api_core.path_template.expand( - "projects/{project}/metricDescriptors/{metric_descriptor=**}", - project=project, - metric_descriptor=metric_descriptor, - ) - - @classmethod - def monitored_resource_descriptor_path(cls, project, monitored_resource_descriptor): - """Return a fully-qualified monitored_resource_descriptor string.""" - return google.api_core.path_template.expand( - "projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}", - project=project, - monitored_resource_descriptor=monitored_resource_descriptor, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.MetricServiceGrpcTransport, - Callable[[~.Credentials, type], ~.MetricServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = metric_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=metric_service_grpc_transport.MetricServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = metric_service_grpc_transport.MetricServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def list_monitored_resource_descriptors( - self, - name, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists monitored resource descriptors that match a filter. This method does not require a Workspace. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.MetricServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_monitored_resource_descriptors(name): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_monitored_resource_descriptors(name).pages: - ... for element in page: - ... # process element - ... pass - - Args: - name (str): Required. The project on which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - filter_ (str): An optional - `filter `__ - describing the descriptors to be returned. The filter can reference the - descriptor's type and labels. For example, the following filter returns - only Google Compute Engine descriptors that have an ``id`` label: - - :: - - resource.type = starts_with("gce_") AND resource.label:id - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.MonitoredResourceDescriptor` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_monitored_resource_descriptors" not in self._inner_api_calls: - self._inner_api_calls[ - "list_monitored_resource_descriptors" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_monitored_resource_descriptors, - default_retry=self._method_configs[ - "ListMonitoredResourceDescriptors" - ].retry, - default_timeout=self._method_configs[ - "ListMonitoredResourceDescriptors" - ].timeout, - client_info=self._client_info, - ) - - request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest( - name=name, filter=filter_, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_monitored_resource_descriptors"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="resource_descriptors", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_monitored_resource_descriptor( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single monitored resource descriptor. This method does not require a Workspace. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.MetricServiceClient() - >>> - >>> name = client.monitored_resource_descriptor_path('[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]') - >>> - >>> response = client.get_monitored_resource_descriptor(name) - - Args: - name (str): Required. The monitored resource descriptor to get. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] - - The ``[RESOURCE_TYPE]`` is a predefined type, such as - ``cloudsql_database``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.MonitoredResourceDescriptor` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_monitored_resource_descriptor" not in self._inner_api_calls: - self._inner_api_calls[ - "get_monitored_resource_descriptor" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_monitored_resource_descriptor, - default_retry=self._method_configs[ - "GetMonitoredResourceDescriptor" - ].retry, - default_timeout=self._method_configs[ - "GetMonitoredResourceDescriptor" - ].timeout, - client_info=self._client_info, - ) - - request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_monitored_resource_descriptor"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_metric_descriptors( - self, - name, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists metric descriptors that match a filter. This method does not require a Workspace. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.MetricServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_metric_descriptors(name): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_metric_descriptors(name).pages: - ... for element in page: - ... # process element - ... pass - - Args: - name (str): Required. The project on which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - filter_ (str): If this field is empty, all custom and system-defined metric - descriptors are returned. Otherwise, the - `filter `__ - specifies which metric descriptors are to be returned. For example, the - following filter matches all `custom - metrics `__: - - :: - - metric.type = starts_with("custom.googleapis.com/") - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.MetricDescriptor` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_metric_descriptors" not in self._inner_api_calls: - self._inner_api_calls[ - "list_metric_descriptors" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_metric_descriptors, - default_retry=self._method_configs["ListMetricDescriptors"].retry, - default_timeout=self._method_configs["ListMetricDescriptors"].timeout, - client_info=self._client_info, - ) - - request = metric_service_pb2.ListMetricDescriptorsRequest( - name=name, filter=filter_, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_metric_descriptors"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="metric_descriptors", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_metric_descriptor( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single metric descriptor. This method does not require a Workspace. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.MetricServiceClient() - >>> - >>> name = client.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]') - >>> - >>> response = client.get_metric_descriptor(name) - - Args: - name (str): Required. The metric descriptor on which to execute the request. The - format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] - - An example value of ``[METRIC_ID]`` is - ``"compute.googleapis.com/instance/disk/read_bytes_count"``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.MetricDescriptor` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_metric_descriptor" not in self._inner_api_calls: - self._inner_api_calls[ - "get_metric_descriptor" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_metric_descriptor, - default_retry=self._method_configs["GetMetricDescriptor"].retry, - default_timeout=self._method_configs["GetMetricDescriptor"].timeout, - client_info=self._client_info, - ) - - request = metric_service_pb2.GetMetricDescriptorRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_metric_descriptor"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_metric_descriptor( - self, - name, - metric_descriptor, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new metric descriptor. User-created metric descriptors - define `custom - metrics `__. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.MetricServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `metric_descriptor`: - >>> metric_descriptor = {} - >>> - >>> response = client.create_metric_descriptor(name, metric_descriptor) - - Args: - name (str): Required. The project on which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - metric_descriptor (Union[dict, ~google.cloud.monitoring_v3.types.MetricDescriptor]): Required. The new `custom - metric `__ - descriptor. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.MetricDescriptor` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.MetricDescriptor` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "create_metric_descriptor" not in self._inner_api_calls: - self._inner_api_calls[ - "create_metric_descriptor" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_metric_descriptor, - default_retry=self._method_configs["CreateMetricDescriptor"].retry, - default_timeout=self._method_configs["CreateMetricDescriptor"].timeout, - client_info=self._client_info, - ) - - request = metric_service_pb2.CreateMetricDescriptorRequest( - name=name, metric_descriptor=metric_descriptor - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_metric_descriptor"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_metric_descriptor( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a metric descriptor. Only user-created `custom - metrics `__ can be - deleted. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.MetricServiceClient() - >>> - >>> name = client.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]') - >>> - >>> client.delete_metric_descriptor(name) - - Args: - name (str): Required. The metric descriptor on which to execute the request. The - format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] - - An example of ``[METRIC_ID]`` is: - ``"custom.googleapis.com/my_test_metric"``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "delete_metric_descriptor" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_metric_descriptor" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_metric_descriptor, - default_retry=self._method_configs["DeleteMetricDescriptor"].retry, - default_timeout=self._method_configs["DeleteMetricDescriptor"].timeout, - client_info=self._client_info, - ) - - request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_metric_descriptor"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_time_series( - self, - name, - filter_, - interval, - view, - aggregation=None, - order_by=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists time series that match a filter. This method does not require a Workspace. - - Example: - >>> from google.cloud import monitoring_v3 - >>> from google.cloud.monitoring_v3 import enums - >>> - >>> client = monitoring_v3.MetricServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `filter_`: - >>> filter_ = '' - >>> - >>> # TODO: Initialize `interval`: - >>> interval = {} - >>> - >>> # TODO: Initialize `view`: - >>> view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL - >>> - >>> # Iterate over all results - >>> for element in client.list_time_series(name, filter_, interval, view): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_time_series(name, filter_, interval, view).pages: - ... for element in page: - ... # process element - ... pass - - Args: - name (str): Required. The project on which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - filter_ (str): Required. A `monitoring - filter `__ that - specifies which time series should be returned. The filter must specify - a single metric type, and can additionally specify metric labels and - other information. For example: - - :: - - metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND - metric.labels.instance_name = "my-instance-name" - interval (Union[dict, ~google.cloud.monitoring_v3.types.TimeInterval]): Required. The time interval for which results should be returned. Only time series - that contain data points in the specified interval are included - in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.TimeInterval` - view (~google.cloud.monitoring_v3.types.TimeSeriesView): Required. Specifies which information is returned about the time series. - aggregation (Union[dict, ~google.cloud.monitoring_v3.types.Aggregation]): Specifies the alignment of data points in individual time series as - well as how to combine the retrieved time series across specified - labels. - - By default (if no ``aggregation`` is explicitly specified), the raw time - series data is returned. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.Aggregation` - order_by (str): Unsupported: must be left blank. The points in each time series are - currently returned in reverse time order (most recent to oldest). - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.TimeSeries` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_time_series" not in self._inner_api_calls: - self._inner_api_calls[ - "list_time_series" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_time_series, - default_retry=self._method_configs["ListTimeSeries"].retry, - default_timeout=self._method_configs["ListTimeSeries"].timeout, - client_info=self._client_info, - ) - - request = metric_service_pb2.ListTimeSeriesRequest( - name=name, - filter=filter_, - interval=interval, - view=view, - aggregation=aggregation, - order_by=order_by, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_time_series"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="time_series", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def create_time_series( - self, - name, - time_series, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates or adds data to one or more time series. - The response is empty if all time series in the request were written. - If any time series could not be written, a corresponding failure message is - included in the error response. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.MetricServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `time_series`: - >>> time_series = [] - >>> - >>> client.create_time_series(name, time_series) - - Args: - name (str): Required. The project on which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - time_series (list[Union[dict, ~google.cloud.monitoring_v3.types.TimeSeries]]): Required. The new data to be added to a list of time series. Adds at - most one data point to each of several time series. The new data point - must be more recent than any other point in its time series. Each - ``TimeSeries`` value must fully specify a unique time series by - supplying all label values for the metric and the monitored resource. - - The maximum number of ``TimeSeries`` objects per ``Create`` request is - 200. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.TimeSeries` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "create_time_series" not in self._inner_api_calls: - self._inner_api_calls[ - "create_time_series" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_time_series, - default_retry=self._method_configs["CreateTimeSeries"].retry, - default_timeout=self._method_configs["CreateTimeSeries"].timeout, - client_info=self._client_info, - ) - - request = metric_service_pb2.CreateTimeSeriesRequest( - name=name, time_series=time_series - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["create_time_series"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/monitoring_v3/gapic/metric_service_client_config.py b/google/cloud/monitoring_v3/gapic/metric_service_client_config.py deleted file mode 100644 index 57315f7c..00000000 --- a/google/cloud/monitoring_v3/gapic/metric_service_client_config.py +++ /dev/null @@ -1,63 +0,0 @@ -config = { - "interfaces": { - "google.monitoring.v3.MetricService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 30000, - "rpc_timeout_multiplier": 1.3, - "max_rpc_timeout_millis": 90000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "ListMonitoredResourceDescriptors": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetMonitoredResourceDescriptor": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListMetricDescriptors": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetMetricDescriptor": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateMetricDescriptor": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteMetricDescriptor": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListTimeSeries": { - "timeout_millis": 90000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateTimeSeries": { - "timeout_millis": 12000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/monitoring_v3/gapic/notification_channel_service_client.py b/google/cloud/monitoring_v3/gapic/notification_channel_service_client.py deleted file mode 100644 index dbab772d..00000000 --- a/google/cloud/monitoring_v3/gapic/notification_channel_service_client.py +++ /dev/null @@ -1,1195 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.monitoring.v3 NotificationChannelService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.api import metric_pb2 as api_metric_pb2 -from google.api import monitored_resource_pb2 -from google.cloud.monitoring_v3.gapic import enums -from google.cloud.monitoring_v3.gapic import notification_channel_service_client_config -from google.cloud.monitoring_v3.gapic.transports import ( - notification_channel_service_grpc_transport, -) -from google.cloud.monitoring_v3.proto import alert_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc -from google.cloud.monitoring_v3.proto import common_pb2 -from google.cloud.monitoring_v3.proto import group_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2_grpc -from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc -from google.cloud.monitoring_v3.proto import notification_pb2 -from google.cloud.monitoring_v3.proto import notification_service_pb2 -from google.cloud.monitoring_v3.proto import notification_service_pb2_grpc -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-monitoring" -).version - - -class NotificationChannelServiceClient(object): - """ - The Notification Channel API provides access to configuration that - controls how messages related to incidents are sent. - """ - - SERVICE_ADDRESS = "monitoring.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.monitoring.v3.NotificationChannelService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - NotificationChannelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def notification_channel_path(cls, project, notification_channel): - """Return a fully-qualified notification_channel string.""" - return google.api_core.path_template.expand( - "projects/{project}/notificationChannels/{notification_channel}", - project=project, - notification_channel=notification_channel, - ) - - @classmethod - def notification_channel_descriptor_path(cls, project, channel_descriptor): - """Return a fully-qualified notification_channel_descriptor string.""" - return google.api_core.path_template.expand( - "projects/{project}/notificationChannelDescriptors/{channel_descriptor}", - project=project, - channel_descriptor=channel_descriptor, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.NotificationChannelServiceGrpcTransport, - Callable[[~.Credentials, type], ~.NotificationChannelServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = notification_channel_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=notification_channel_service_grpc_transport.NotificationChannelServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = notification_channel_service_grpc_transport.NotificationChannelServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def list_notification_channel_descriptors( - self, - name, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the descriptors for supported channel types. The use of descriptors - makes it possible for new channel types to be dynamically added. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_notification_channel_descriptors(name): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_notification_channel_descriptors(name).pages: - ... for element in page: - ... # process element - ... pass - - Args: - name (str): Required. The REST resource name of the parent from which to - retrieve the notification channel descriptors. The expected syntax is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - - Note that this names the parent container in which to look for the - descriptors; to retrieve a single descriptor by name, use the - ``GetNotificationChannelDescriptor`` operation, instead. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.NotificationChannelDescriptor` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_notification_channel_descriptors" not in self._inner_api_calls: - self._inner_api_calls[ - "list_notification_channel_descriptors" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_notification_channel_descriptors, - default_retry=self._method_configs[ - "ListNotificationChannelDescriptors" - ].retry, - default_timeout=self._method_configs[ - "ListNotificationChannelDescriptors" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.ListNotificationChannelDescriptorsRequest( - name=name, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_notification_channel_descriptors"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="channel_descriptors", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_notification_channel_descriptor( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single channel descriptor. The descriptor indicates which fields - are expected / permitted for a notification channel of the given type. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.notification_channel_descriptor_path('[PROJECT]', '[CHANNEL_DESCRIPTOR]') - >>> - >>> response = client.get_notification_channel_descriptor(name) - - Args: - name (str): Required. The channel type for which to execute the request. The - format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.NotificationChannelDescriptor` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_notification_channel_descriptor" not in self._inner_api_calls: - self._inner_api_calls[ - "get_notification_channel_descriptor" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_notification_channel_descriptor, - default_retry=self._method_configs[ - "GetNotificationChannelDescriptor" - ].retry, - default_timeout=self._method_configs[ - "GetNotificationChannelDescriptor" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.GetNotificationChannelDescriptorRequest( - name=name - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_notification_channel_descriptor"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_notification_channels( - self, - name, - filter_=None, - order_by=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the notification channels that have been created for the project. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_notification_channels(name): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_notification_channels(name).pages: - ... for element in page: - ... # process element - ... pass - - Args: - name (str): Required. The project on which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - - This names the container in which to look for the notification channels; - it does not name a specific channel. To query a specific channel by REST - resource name, use the ``GetNotificationChannel`` operation. - filter_ (str): If provided, this field specifies the criteria that must be met by - notification channels to be included in the response. - - For more details, see `sorting and - filtering `__. - order_by (str): A comma-separated list of fields by which to sort the result. - Supports the same set of fields as in ``filter``. Entries can be - prefixed with a minus sign to sort in descending rather than ascending - order. - - For more details, see `sorting and - filtering `__. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.NotificationChannel` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_notification_channels" not in self._inner_api_calls: - self._inner_api_calls[ - "list_notification_channels" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_notification_channels, - default_retry=self._method_configs["ListNotificationChannels"].retry, - default_timeout=self._method_configs[ - "ListNotificationChannels" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.ListNotificationChannelsRequest( - name=name, filter=filter_, order_by=order_by, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_notification_channels"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="notification_channels", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_notification_channel( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single notification channel. The channel includes the relevant - configuration details with which the channel was created. However, the - response may truncate or omit passwords, API keys, or other private key - matter and thus the response may not be 100% identical to the information - that was supplied in the call to the create method. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.notification_channel_path('[PROJECT]', '[NOTIFICATION_CHANNEL]') - >>> - >>> response = client.get_notification_channel(name) - - Args: - name (str): Required. The channel for which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.NotificationChannel` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_notification_channel" not in self._inner_api_calls: - self._inner_api_calls[ - "get_notification_channel" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_notification_channel, - default_retry=self._method_configs["GetNotificationChannel"].retry, - default_timeout=self._method_configs["GetNotificationChannel"].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.GetNotificationChannelRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_notification_channel"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_notification_channel( - self, - name, - notification_channel, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new notification channel, representing a single notification - endpoint such as an email address, SMS number, or PagerDuty service. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `notification_channel`: - >>> notification_channel = {} - >>> - >>> response = client.create_notification_channel(name, notification_channel) - - Args: - name (str): Required. The project on which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - - This names the container into which the channel will be written, this - does not name the newly created channel. The resulting channel's name - will have a normalized version of this field as a prefix, but will add - ``/notificationChannels/[CHANNEL_ID]`` to identify the channel. - notification_channel (Union[dict, ~google.cloud.monitoring_v3.types.NotificationChannel]): Required. The definition of the ``NotificationChannel`` to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.NotificationChannel` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.NotificationChannel` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "create_notification_channel" not in self._inner_api_calls: - self._inner_api_calls[ - "create_notification_channel" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_notification_channel, - default_retry=self._method_configs["CreateNotificationChannel"].retry, - default_timeout=self._method_configs[ - "CreateNotificationChannel" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.CreateNotificationChannelRequest( - name=name, notification_channel=notification_channel - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_notification_channel"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_notification_channel( - self, - notification_channel, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a notification channel. Fields not specified in the field mask - remain unchanged. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> # TODO: Initialize `notification_channel`: - >>> notification_channel = {} - >>> - >>> response = client.update_notification_channel(notification_channel) - - Args: - notification_channel (Union[dict, ~google.cloud.monitoring_v3.types.NotificationChannel]): Required. A description of the changes to be applied to the - specified notification channel. The description must provide a - definition for fields to be updated; the names of these fields should - also be included in the ``update_mask``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.NotificationChannel` - update_mask (Union[dict, ~google.cloud.monitoring_v3.types.FieldMask]): The fields to update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.NotificationChannel` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "update_notification_channel" not in self._inner_api_calls: - self._inner_api_calls[ - "update_notification_channel" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_notification_channel, - default_retry=self._method_configs["UpdateNotificationChannel"].retry, - default_timeout=self._method_configs[ - "UpdateNotificationChannel" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.UpdateNotificationChannelRequest( - notification_channel=notification_channel, update_mask=update_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("notification_channel.name", notification_channel.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_notification_channel"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_notification_channel( - self, - name, - force=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a notification channel. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.notification_channel_path('[PROJECT]', '[NOTIFICATION_CHANNEL]') - >>> - >>> client.delete_notification_channel(name) - - Args: - name (str): Required. The channel for which to execute the request. The format - is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - force (bool): If true, the notification channel will be deleted regardless of its - use in alert policies (the policies will be updated to remove the - channel). If false, channels that are still referenced by an existing - alerting policy will fail to be deleted in a delete operation. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "delete_notification_channel" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_notification_channel" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_notification_channel, - default_retry=self._method_configs["DeleteNotificationChannel"].retry, - default_timeout=self._method_configs[ - "DeleteNotificationChannel" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.DeleteNotificationChannelRequest( - name=name, force=force - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_notification_channel"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def send_notification_channel_verification_code( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Causes a verification code to be delivered to the channel. The code - can then be supplied in ``VerifyNotificationChannel`` to verify the - channel. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.notification_channel_path('[PROJECT]', '[NOTIFICATION_CHANNEL]') - >>> - >>> client.send_notification_channel_verification_code(name) - - Args: - name (str): Required. The notification channel to which to send a verification code. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "send_notification_channel_verification_code" not in self._inner_api_calls: - self._inner_api_calls[ - "send_notification_channel_verification_code" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.send_notification_channel_verification_code, - default_retry=self._method_configs[ - "SendNotificationChannelVerificationCode" - ].retry, - default_timeout=self._method_configs[ - "SendNotificationChannelVerificationCode" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.SendNotificationChannelVerificationCodeRequest( - name=name - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["send_notification_channel_verification_code"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_notification_channel_verification_code( - self, - name, - expire_time=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Requests a verification code for an already verified channel that can then - be used in a call to VerifyNotificationChannel() on a different channel - with an equivalent identity in the same or in a different project. This - makes it possible to copy a channel between projects without requiring - manual reverification of the channel. If the channel is not in the - verified state, this method will fail (in other words, this may only be - used if the SendNotificationChannelVerificationCode and - VerifyNotificationChannel paths have already been used to put the given - channel into the verified state). - - There is no guarantee that the verification codes returned by this method - will be of a similar structure or form as the ones that are delivered - to the channel via SendNotificationChannelVerificationCode; while - VerifyNotificationChannel() will recognize both the codes delivered via - SendNotificationChannelVerificationCode() and returned from - GetNotificationChannelVerificationCode(), it is typically the case that - the verification codes delivered via - SendNotificationChannelVerificationCode() will be shorter and also - have a shorter expiration (e.g. codes such as "G-123456") whereas - GetVerificationCode() will typically return a much longer, websafe base - 64 encoded string that has a longer expiration time. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.notification_channel_path('[PROJECT]', '[NOTIFICATION_CHANNEL]') - >>> - >>> response = client.get_notification_channel_verification_code(name) - - Args: - name (str): Required. The notification channel for which a verification code is to be generated - and retrieved. This must name a channel that is already verified; if - the specified channel is not verified, the request will fail. - expire_time (Union[dict, ~google.cloud.monitoring_v3.types.Timestamp]): The desired expiration time. If specified, the API will guarantee that - the returned code will not be valid after the specified timestamp; - however, the API cannot guarantee that the returned code will be - valid for at least as long as the requested time (the API puts an upper - bound on the amount of time for which a code may be valid). If omitted, - a default expiration will be used, which may be less than the max - permissible expiration (so specifying an expiration may extend the - code's lifetime over omitting an expiration, even though the API does - impose an upper limit on the maximum expiration that is permitted). - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.Timestamp` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.GetNotificationChannelVerificationCodeResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_notification_channel_verification_code" not in self._inner_api_calls: - self._inner_api_calls[ - "get_notification_channel_verification_code" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_notification_channel_verification_code, - default_retry=self._method_configs[ - "GetNotificationChannelVerificationCode" - ].retry, - default_timeout=self._method_configs[ - "GetNotificationChannelVerificationCode" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.GetNotificationChannelVerificationCodeRequest( - name=name, expire_time=expire_time - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_notification_channel_verification_code"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def verify_notification_channel( - self, - name, - code, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Verifies a ``NotificationChannel`` by proving receipt of the code - delivered to the channel as a result of calling - ``SendNotificationChannelVerificationCode``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.NotificationChannelServiceClient() - >>> - >>> name = client.notification_channel_path('[PROJECT]', '[NOTIFICATION_CHANNEL]') - >>> - >>> # TODO: Initialize `code`: - >>> code = '' - >>> - >>> response = client.verify_notification_channel(name, code) - - Args: - name (str): Required. The notification channel to verify. - code (str): Required. The verification code that was delivered to the channel as - a result of invoking the ``SendNotificationChannelVerificationCode`` API - method or that was retrieved from a verified channel via - ``GetNotificationChannelVerificationCode``. For example, one might have - "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only - guaranteed that the code is valid UTF-8; one should not make any - assumptions regarding the structure or format of the code). - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.NotificationChannel` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "verify_notification_channel" not in self._inner_api_calls: - self._inner_api_calls[ - "verify_notification_channel" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.verify_notification_channel, - default_retry=self._method_configs["VerifyNotificationChannel"].retry, - default_timeout=self._method_configs[ - "VerifyNotificationChannel" - ].timeout, - client_info=self._client_info, - ) - - request = notification_service_pb2.VerifyNotificationChannelRequest( - name=name, code=code - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["verify_notification_channel"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/monitoring_v3/gapic/notification_channel_service_client_config.py b/google/cloud/monitoring_v3/gapic/notification_channel_service_client_config.py deleted file mode 100644 index 6d0e1e1d..00000000 --- a/google/cloud/monitoring_v3/gapic/notification_channel_service_client_config.py +++ /dev/null @@ -1,73 +0,0 @@ -config = { - "interfaces": { - "google.monitoring.v3.NotificationChannelService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "ListNotificationChannelDescriptors": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetNotificationChannelDescriptor": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListNotificationChannels": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetNotificationChannel": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateNotificationChannel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateNotificationChannel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteNotificationChannel": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "SendNotificationChannelVerificationCode": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetNotificationChannelVerificationCode": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "VerifyNotificationChannel": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/monitoring_v3/gapic/service_monitoring_service_client.py b/google/cloud/monitoring_v3/gapic/service_monitoring_service_client.py deleted file mode 100644 index f015a402..00000000 --- a/google/cloud/monitoring_v3/gapic/service_monitoring_service_client.py +++ /dev/null @@ -1,1158 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.monitoring.v3 ServiceMonitoringService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.api import metric_pb2 as api_metric_pb2 -from google.api import monitored_resource_pb2 -from google.cloud.monitoring_v3.gapic import enums -from google.cloud.monitoring_v3.gapic import service_monitoring_service_client_config -from google.cloud.monitoring_v3.gapic.transports import ( - service_monitoring_service_grpc_transport, -) -from google.cloud.monitoring_v3.proto import alert_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc -from google.cloud.monitoring_v3.proto import common_pb2 -from google.cloud.monitoring_v3.proto import group_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2_grpc -from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc -from google.cloud.monitoring_v3.proto import notification_pb2 -from google.cloud.monitoring_v3.proto import notification_service_pb2 -from google.cloud.monitoring_v3.proto import notification_service_pb2_grpc -from google.cloud.monitoring_v3.proto import service_pb2 -from google.cloud.monitoring_v3.proto import service_service_pb2 -from google.cloud.monitoring_v3.proto import service_service_pb2_grpc -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-monitoring" -).version - - -class ServiceMonitoringServiceClient(object): - """ - The Cloud Monitoring Service-Oriented Monitoring API has endpoints - for managing and querying aspects of a workspace's services. These - include the ``Service``'s monitored resources, its Service-Level - Objectives, and a taxonomy of categorized Health Metrics. - """ - - SERVICE_ADDRESS = "monitoring.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.monitoring.v3.ServiceMonitoringService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ServiceMonitoringServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - @classmethod - def service_path(cls, project, service): - """Return a fully-qualified service string.""" - return google.api_core.path_template.expand( - "projects/{project}/services/{service}", project=project, service=service - ) - - @classmethod - def service_level_objective_path(cls, project, service, service_level_objective): - """Return a fully-qualified service_level_objective string.""" - return google.api_core.path_template.expand( - "projects/{project}/services/{service}/serviceLevelObjectives/{service_level_objective}", - project=project, - service=service, - service_level_objective=service_level_objective, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.ServiceMonitoringServiceGrpcTransport, - Callable[[~.Credentials, type], ~.ServiceMonitoringServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = service_monitoring_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=service_monitoring_service_grpc_transport.ServiceMonitoringServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = service_monitoring_service_grpc_transport.ServiceMonitoringServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_service( - self, - parent, - service, - service_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create a ``Service``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `service`: - >>> service = {} - >>> - >>> response = client.create_service(parent, service) - - Args: - parent (str): Required. Resource name of the parent workspace. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - service (Union[dict, ~google.cloud.monitoring_v3.types.Service]): Required. The ``Service`` to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.Service` - service_id (str): Optional. The Service id to use for this Service. If omitted, an id - will be generated instead. Must match the pattern ``[a-z0-9\-]+`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.Service` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "create_service" not in self._inner_api_calls: - self._inner_api_calls[ - "create_service" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_service, - default_retry=self._method_configs["CreateService"].retry, - default_timeout=self._method_configs["CreateService"].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.CreateServiceRequest( - parent=parent, service=service, service_id=service_id - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_service"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_service( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Get the named ``Service``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> name = client.service_path('[PROJECT]', '[SERVICE]') - >>> - >>> response = client.get_service(name) - - Args: - name (str): Required. Resource name of the ``Service``. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.Service` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_service" not in self._inner_api_calls: - self._inner_api_calls[ - "get_service" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_service, - default_retry=self._method_configs["GetService"].retry, - default_timeout=self._method_configs["GetService"].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.GetServiceRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_service"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_services( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - List ``Service``\ s for this workspace. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_services(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_services(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. Resource name of the parent containing the listed - services, either a project or a Monitoring Workspace. The formats are: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - workspaces/[HOST_PROJECT_ID_OR_NUMBER] - filter_ (str): A filter specifying what ``Service``\ s to return. The filter - currently supports the following fields: - - :: - - - `identifier_case` - - `app_engine.module_id` - - `cloud_endpoints.service` - - `cluster_istio.location` - - `cluster_istio.cluster_name` - - `cluster_istio.service_namespace` - - `cluster_istio.service_name` - - ``identifier_case`` refers to which option in the identifier oneof is - populated. For example, the filter ``identifier_case = "CUSTOM"`` would - match all services with a value for the ``custom`` field. Valid options - are "CUSTOM", "APP_ENGINE", "CLOUD_ENDPOINTS", and "CLUSTER_ISTIO". - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.Service` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_services" not in self._inner_api_calls: - self._inner_api_calls[ - "list_services" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_services, - default_retry=self._method_configs["ListServices"].retry, - default_timeout=self._method_configs["ListServices"].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.ListServicesRequest( - parent=parent, filter=filter_, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_services"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="services", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_service( - self, - service, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Update this ``Service``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> # TODO: Initialize `service`: - >>> service = {} - >>> - >>> response = client.update_service(service) - - Args: - service (Union[dict, ~google.cloud.monitoring_v3.types.Service]): Required. The ``Service`` to draw updates from. The given ``name`` - specifies the resource to update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.Service` - update_mask (Union[dict, ~google.cloud.monitoring_v3.types.FieldMask]): A set of field paths defining which fields to use for the update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.Service` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "update_service" not in self._inner_api_calls: - self._inner_api_calls[ - "update_service" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_service, - default_retry=self._method_configs["UpdateService"].retry, - default_timeout=self._method_configs["UpdateService"].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.UpdateServiceRequest( - service=service, update_mask=update_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("service.name", service.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_service"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_service( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Soft delete this ``Service``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> name = client.service_path('[PROJECT]', '[SERVICE]') - >>> - >>> client.delete_service(name) - - Args: - name (str): Required. Resource name of the ``Service`` to delete. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "delete_service" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_service" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_service, - default_retry=self._method_configs["DeleteService"].retry, - default_timeout=self._method_configs["DeleteService"].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.DeleteServiceRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_service"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_service_level_objective( - self, - parent, - service_level_objective, - service_level_objective_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create a ``ServiceLevelObjective`` for the given ``Service``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> parent = client.service_path('[PROJECT]', '[SERVICE]') - >>> - >>> # TODO: Initialize `service_level_objective`: - >>> service_level_objective = {} - >>> - >>> response = client.create_service_level_objective(parent, service_level_objective) - - Args: - parent (str): Required. Resource name of the parent ``Service``. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - service_level_objective (Union[dict, ~google.cloud.monitoring_v3.types.ServiceLevelObjective]): Required. The ``ServiceLevelObjective`` to create. The provided - ``name`` will be respected if no ``ServiceLevelObjective`` exists with - this name. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.ServiceLevelObjective` - service_level_objective_id (str): Optional. The ServiceLevelObjective id to use for this - ServiceLevelObjective. If omitted, an id will be generated instead. Must - match the pattern ``[a-z0-9\-]+`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.ServiceLevelObjective` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "create_service_level_objective" not in self._inner_api_calls: - self._inner_api_calls[ - "create_service_level_objective" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_service_level_objective, - default_retry=self._method_configs["CreateServiceLevelObjective"].retry, - default_timeout=self._method_configs[ - "CreateServiceLevelObjective" - ].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.CreateServiceLevelObjectiveRequest( - parent=parent, - service_level_objective=service_level_objective, - service_level_objective_id=service_level_objective_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_service_level_objective"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_service_level_objective( - self, - name, - view=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Get a ``ServiceLevelObjective`` by name. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> name = client.service_level_objective_path('[PROJECT]', '[SERVICE]', '[SERVICE_LEVEL_OBJECTIVE]') - >>> - >>> response = client.get_service_level_objective(name) - - Args: - name (str): Required. Resource name of the ``ServiceLevelObjective`` to get. The - format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] - view (~google.cloud.monitoring_v3.types.View): View of the ``ServiceLevelObjective`` to return. If ``DEFAULT``, - return the ``ServiceLevelObjective`` as originally defined. If - ``EXPLICIT`` and the ``ServiceLevelObjective`` is defined in terms of a - ``BasicSli``, replace the ``BasicSli`` with a ``RequestBasedSli`` - spelling out how the SLI is computed. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.ServiceLevelObjective` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_service_level_objective" not in self._inner_api_calls: - self._inner_api_calls[ - "get_service_level_objective" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_service_level_objective, - default_retry=self._method_configs["GetServiceLevelObjective"].retry, - default_timeout=self._method_configs[ - "GetServiceLevelObjective" - ].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.GetServiceLevelObjectiveRequest( - name=name, view=view - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_service_level_objective"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_service_level_objectives( - self, - parent, - filter_=None, - page_size=None, - view=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - List the ``ServiceLevelObjective``\ s for the given ``Service``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> parent = client.service_path('[PROJECT]', '[SERVICE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_service_level_objectives(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_service_level_objectives(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. Resource name of the parent containing the listed SLOs, - either a project or a Monitoring Workspace. The formats are: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- - filter_ (str): A filter specifying what ``ServiceLevelObjective``\ s to return. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - view (~google.cloud.monitoring_v3.types.View): View of the ``ServiceLevelObjective``\ s to return. If ``DEFAULT``, - return each ``ServiceLevelObjective`` as originally defined. If - ``EXPLICIT`` and the ``ServiceLevelObjective`` is defined in terms of a - ``BasicSli``, replace the ``BasicSli`` with a ``RequestBasedSli`` - spelling out how the SLI is computed. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.ServiceLevelObjective` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_service_level_objectives" not in self._inner_api_calls: - self._inner_api_calls[ - "list_service_level_objectives" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_service_level_objectives, - default_retry=self._method_configs["ListServiceLevelObjectives"].retry, - default_timeout=self._method_configs[ - "ListServiceLevelObjectives" - ].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.ListServiceLevelObjectivesRequest( - parent=parent, filter=filter_, page_size=page_size, view=view - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_service_level_objectives"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="service_level_objectives", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_service_level_objective( - self, - service_level_objective, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Update the given ``ServiceLevelObjective``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> # TODO: Initialize `service_level_objective`: - >>> service_level_objective = {} - >>> - >>> response = client.update_service_level_objective(service_level_objective) - - Args: - service_level_objective (Union[dict, ~google.cloud.monitoring_v3.types.ServiceLevelObjective]): Required. The ``ServiceLevelObjective`` to draw updates from. The - given ``name`` specifies the resource to update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.ServiceLevelObjective` - update_mask (Union[dict, ~google.cloud.monitoring_v3.types.FieldMask]): A set of field paths defining which fields to use for the update. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.ServiceLevelObjective` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "update_service_level_objective" not in self._inner_api_calls: - self._inner_api_calls[ - "update_service_level_objective" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_service_level_objective, - default_retry=self._method_configs["UpdateServiceLevelObjective"].retry, - default_timeout=self._method_configs[ - "UpdateServiceLevelObjective" - ].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.UpdateServiceLevelObjectiveRequest( - service_level_objective=service_level_objective, update_mask=update_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [ - ("service_level_objective.name", service_level_objective.name) - ] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_service_level_objective"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_service_level_objective( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Delete the given ``ServiceLevelObjective``. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.ServiceMonitoringServiceClient() - >>> - >>> name = client.service_level_objective_path('[PROJECT]', '[SERVICE]', '[SERVICE_LEVEL_OBJECTIVE]') - >>> - >>> client.delete_service_level_objective(name) - - Args: - name (str): Required. Resource name of the ``ServiceLevelObjective`` to delete. - The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "delete_service_level_objective" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_service_level_objective" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_service_level_objective, - default_retry=self._method_configs["DeleteServiceLevelObjective"].retry, - default_timeout=self._method_configs[ - "DeleteServiceLevelObjective" - ].timeout, - client_info=self._client_info, - ) - - request = service_service_pb2.DeleteServiceLevelObjectiveRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_service_level_objective"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/monitoring_v3/gapic/service_monitoring_service_client_config.py b/google/cloud/monitoring_v3/gapic/service_monitoring_service_client_config.py deleted file mode 100644 index 575c0f88..00000000 --- a/google/cloud/monitoring_v3/gapic/service_monitoring_service_client_config.py +++ /dev/null @@ -1,73 +0,0 @@ -config = { - "interfaces": { - "google.monitoring.v3.ServiceMonitoringService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "CreateService": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetService": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListServices": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "UpdateService": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteService": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateServiceLevelObjective": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetServiceLevelObjective": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListServiceLevelObjectives": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "UpdateServiceLevelObjective": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteServiceLevelObjective": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/monitoring_v3/gapic/transports/__init__.py b/google/cloud/monitoring_v3/gapic/transports/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/monitoring_v3/gapic/transports/alert_policy_service_grpc_transport.py b/google/cloud/monitoring_v3/gapic/transports/alert_policy_service_grpc_transport.py deleted file mode 100644 index e911a849..00000000 --- a/google/cloud/monitoring_v3/gapic/transports/alert_policy_service_grpc_transport.py +++ /dev/null @@ -1,183 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc - - -class AlertPolicyServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.monitoring.v3 AlertPolicyService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write", - ) - - def __init__( - self, channel=None, credentials=None, address="monitoring.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "alert_policy_service_stub": alert_service_pb2_grpc.AlertPolicyServiceStub( - channel - ) - } - - @classmethod - def create_channel( - cls, address="monitoring.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def list_alert_policies(self): - """Return the gRPC stub for :meth:`AlertPolicyServiceClient.list_alert_policies`. - - Lists the existing alerting policies for the project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["alert_policy_service_stub"].ListAlertPolicies - - @property - def get_alert_policy(self): - """Return the gRPC stub for :meth:`AlertPolicyServiceClient.get_alert_policy`. - - Gets a single alerting policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["alert_policy_service_stub"].GetAlertPolicy - - @property - def create_alert_policy(self): - """Return the gRPC stub for :meth:`AlertPolicyServiceClient.create_alert_policy`. - - Creates a new alerting policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["alert_policy_service_stub"].CreateAlertPolicy - - @property - def delete_alert_policy(self): - """Return the gRPC stub for :meth:`AlertPolicyServiceClient.delete_alert_policy`. - - Deletes an alerting policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["alert_policy_service_stub"].DeleteAlertPolicy - - @property - def update_alert_policy(self): - """Return the gRPC stub for :meth:`AlertPolicyServiceClient.update_alert_policy`. - - Updates an alerting policy. You can either replace the entire policy - with a new one or replace only certain fields in the current alerting - policy by specifying the fields to be updated via ``updateMask``. - Returns the updated alerting policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["alert_policy_service_stub"].UpdateAlertPolicy diff --git a/google/cloud/monitoring_v3/gapic/transports/group_service_grpc_transport.py b/google/cloud/monitoring_v3/gapic/transports/group_service_grpc_transport.py deleted file mode 100644 index 83a75e1a..00000000 --- a/google/cloud/monitoring_v3/gapic/transports/group_service_grpc_transport.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.monitoring_v3.proto import group_service_pb2_grpc - - -class GroupServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.monitoring.v3 GroupService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write", - ) - - def __init__( - self, channel=None, credentials=None, address="monitoring.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "group_service_stub": group_service_pb2_grpc.GroupServiceStub(channel) - } - - @classmethod - def create_channel( - cls, address="monitoring.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def list_groups(self): - """Return the gRPC stub for :meth:`GroupServiceClient.list_groups`. - - Lists the existing groups. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["group_service_stub"].ListGroups - - @property - def get_group(self): - """Return the gRPC stub for :meth:`GroupServiceClient.get_group`. - - Gets a single group. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["group_service_stub"].GetGroup - - @property - def create_group(self): - """Return the gRPC stub for :meth:`GroupServiceClient.create_group`. - - Creates a new group. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["group_service_stub"].CreateGroup - - @property - def update_group(self): - """Return the gRPC stub for :meth:`GroupServiceClient.update_group`. - - Updates an existing group. You can change any group attributes - except ``name``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["group_service_stub"].UpdateGroup - - @property - def delete_group(self): - """Return the gRPC stub for :meth:`GroupServiceClient.delete_group`. - - Deletes an existing group. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["group_service_stub"].DeleteGroup - - @property - def list_group_members(self): - """Return the gRPC stub for :meth:`GroupServiceClient.list_group_members`. - - Lists the monitored resources that are members of a group. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["group_service_stub"].ListGroupMembers diff --git a/google/cloud/monitoring_v3/gapic/transports/metric_service_grpc_transport.py b/google/cloud/monitoring_v3/gapic/transports/metric_service_grpc_transport.py deleted file mode 100644 index 33ddb1d5..00000000 --- a/google/cloud/monitoring_v3/gapic/transports/metric_service_grpc_transport.py +++ /dev/null @@ -1,224 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc - - -class MetricServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.monitoring.v3 MetricService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write", - ) - - def __init__( - self, channel=None, credentials=None, address="monitoring.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "metric_service_stub": metric_service_pb2_grpc.MetricServiceStub(channel) - } - - @classmethod - def create_channel( - cls, address="monitoring.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def list_monitored_resource_descriptors(self): - """Return the gRPC stub for :meth:`MetricServiceClient.list_monitored_resource_descriptors`. - - Lists monitored resource descriptors that match a filter. This method does not require a Workspace. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["metric_service_stub"].ListMonitoredResourceDescriptors - - @property - def get_monitored_resource_descriptor(self): - """Return the gRPC stub for :meth:`MetricServiceClient.get_monitored_resource_descriptor`. - - Gets a single monitored resource descriptor. This method does not require a Workspace. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["metric_service_stub"].GetMonitoredResourceDescriptor - - @property - def list_metric_descriptors(self): - """Return the gRPC stub for :meth:`MetricServiceClient.list_metric_descriptors`. - - Lists metric descriptors that match a filter. This method does not require a Workspace. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["metric_service_stub"].ListMetricDescriptors - - @property - def get_metric_descriptor(self): - """Return the gRPC stub for :meth:`MetricServiceClient.get_metric_descriptor`. - - Gets a single metric descriptor. This method does not require a Workspace. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["metric_service_stub"].GetMetricDescriptor - - @property - def create_metric_descriptor(self): - """Return the gRPC stub for :meth:`MetricServiceClient.create_metric_descriptor`. - - Creates a new metric descriptor. User-created metric descriptors - define `custom - metrics `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["metric_service_stub"].CreateMetricDescriptor - - @property - def delete_metric_descriptor(self): - """Return the gRPC stub for :meth:`MetricServiceClient.delete_metric_descriptor`. - - Deletes a metric descriptor. Only user-created `custom - metrics `__ can be - deleted. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["metric_service_stub"].DeleteMetricDescriptor - - @property - def list_time_series(self): - """Return the gRPC stub for :meth:`MetricServiceClient.list_time_series`. - - Lists time series that match a filter. This method does not require a Workspace. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["metric_service_stub"].ListTimeSeries - - @property - def create_time_series(self): - """Return the gRPC stub for :meth:`MetricServiceClient.create_time_series`. - - Creates or adds data to one or more time series. - The response is empty if all time series in the request were written. - If any time series could not be written, a corresponding failure message is - included in the error response. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["metric_service_stub"].CreateTimeSeries diff --git a/google/cloud/monitoring_v3/gapic/transports/notification_channel_service_grpc_transport.py b/google/cloud/monitoring_v3/gapic/transports/notification_channel_service_grpc_transport.py deleted file mode 100644 index bdc98192..00000000 --- a/google/cloud/monitoring_v3/gapic/transports/notification_channel_service_grpc_transport.py +++ /dev/null @@ -1,293 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.monitoring_v3.proto import notification_service_pb2_grpc - - -class NotificationChannelServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.monitoring.v3 NotificationChannelService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write", - ) - - def __init__( - self, channel=None, credentials=None, address="monitoring.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "notification_channel_service_stub": notification_service_pb2_grpc.NotificationChannelServiceStub( - channel - ) - } - - @classmethod - def create_channel( - cls, address="monitoring.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def list_notification_channel_descriptors(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.list_notification_channel_descriptors`. - - Lists the descriptors for supported channel types. The use of descriptors - makes it possible for new channel types to be dynamically added. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "notification_channel_service_stub" - ].ListNotificationChannelDescriptors - - @property - def get_notification_channel_descriptor(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.get_notification_channel_descriptor`. - - Gets a single channel descriptor. The descriptor indicates which fields - are expected / permitted for a notification channel of the given type. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "notification_channel_service_stub" - ].GetNotificationChannelDescriptor - - @property - def list_notification_channels(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.list_notification_channels`. - - Lists the notification channels that have been created for the project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["notification_channel_service_stub"].ListNotificationChannels - - @property - def get_notification_channel(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.get_notification_channel`. - - Gets a single notification channel. The channel includes the relevant - configuration details with which the channel was created. However, the - response may truncate or omit passwords, API keys, or other private key - matter and thus the response may not be 100% identical to the information - that was supplied in the call to the create method. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["notification_channel_service_stub"].GetNotificationChannel - - @property - def create_notification_channel(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.create_notification_channel`. - - Creates a new notification channel, representing a single notification - endpoint such as an email address, SMS number, or PagerDuty service. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "notification_channel_service_stub" - ].CreateNotificationChannel - - @property - def update_notification_channel(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.update_notification_channel`. - - Updates a notification channel. Fields not specified in the field mask - remain unchanged. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "notification_channel_service_stub" - ].UpdateNotificationChannel - - @property - def delete_notification_channel(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.delete_notification_channel`. - - Deletes a notification channel. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "notification_channel_service_stub" - ].DeleteNotificationChannel - - @property - def send_notification_channel_verification_code(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.send_notification_channel_verification_code`. - - Causes a verification code to be delivered to the channel. The code - can then be supplied in ``VerifyNotificationChannel`` to verify the - channel. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "notification_channel_service_stub" - ].SendNotificationChannelVerificationCode - - @property - def get_notification_channel_verification_code(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.get_notification_channel_verification_code`. - - Requests a verification code for an already verified channel that can then - be used in a call to VerifyNotificationChannel() on a different channel - with an equivalent identity in the same or in a different project. This - makes it possible to copy a channel between projects without requiring - manual reverification of the channel. If the channel is not in the - verified state, this method will fail (in other words, this may only be - used if the SendNotificationChannelVerificationCode and - VerifyNotificationChannel paths have already been used to put the given - channel into the verified state). - - There is no guarantee that the verification codes returned by this method - will be of a similar structure or form as the ones that are delivered - to the channel via SendNotificationChannelVerificationCode; while - VerifyNotificationChannel() will recognize both the codes delivered via - SendNotificationChannelVerificationCode() and returned from - GetNotificationChannelVerificationCode(), it is typically the case that - the verification codes delivered via - SendNotificationChannelVerificationCode() will be shorter and also - have a shorter expiration (e.g. codes such as "G-123456") whereas - GetVerificationCode() will typically return a much longer, websafe base - 64 encoded string that has a longer expiration time. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "notification_channel_service_stub" - ].GetNotificationChannelVerificationCode - - @property - def verify_notification_channel(self): - """Return the gRPC stub for :meth:`NotificationChannelServiceClient.verify_notification_channel`. - - Verifies a ``NotificationChannel`` by proving receipt of the code - delivered to the channel as a result of calling - ``SendNotificationChannelVerificationCode``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "notification_channel_service_stub" - ].VerifyNotificationChannel diff --git a/google/cloud/monitoring_v3/gapic/transports/service_monitoring_service_grpc_transport.py b/google/cloud/monitoring_v3/gapic/transports/service_monitoring_service_grpc_transport.py deleted file mode 100644 index f28253b1..00000000 --- a/google/cloud/monitoring_v3/gapic/transports/service_monitoring_service_grpc_transport.py +++ /dev/null @@ -1,251 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.monitoring_v3.proto import service_service_pb2_grpc - - -class ServiceMonitoringServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.monitoring.v3 ServiceMonitoringService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write", - ) - - def __init__( - self, channel=None, credentials=None, address="monitoring.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "service_monitoring_service_stub": service_service_pb2_grpc.ServiceMonitoringServiceStub( - channel - ) - } - - @classmethod - def create_channel( - cls, address="monitoring.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_service(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.create_service`. - - Create a ``Service``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["service_monitoring_service_stub"].CreateService - - @property - def get_service(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.get_service`. - - Get the named ``Service``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["service_monitoring_service_stub"].GetService - - @property - def list_services(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.list_services`. - - List ``Service``\ s for this workspace. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["service_monitoring_service_stub"].ListServices - - @property - def update_service(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.update_service`. - - Update this ``Service``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["service_monitoring_service_stub"].UpdateService - - @property - def delete_service(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.delete_service`. - - Soft delete this ``Service``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["service_monitoring_service_stub"].DeleteService - - @property - def create_service_level_objective(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.create_service_level_objective`. - - Create a ``ServiceLevelObjective`` for the given ``Service``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "service_monitoring_service_stub" - ].CreateServiceLevelObjective - - @property - def get_service_level_objective(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.get_service_level_objective`. - - Get a ``ServiceLevelObjective`` by name. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["service_monitoring_service_stub"].GetServiceLevelObjective - - @property - def list_service_level_objectives(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.list_service_level_objectives`. - - List the ``ServiceLevelObjective``\ s for the given ``Service``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["service_monitoring_service_stub"].ListServiceLevelObjectives - - @property - def update_service_level_objective(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.update_service_level_objective`. - - Update the given ``ServiceLevelObjective``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "service_monitoring_service_stub" - ].UpdateServiceLevelObjective - - @property - def delete_service_level_objective(self): - """Return the gRPC stub for :meth:`ServiceMonitoringServiceClient.delete_service_level_objective`. - - Delete the given ``ServiceLevelObjective``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "service_monitoring_service_stub" - ].DeleteServiceLevelObjective diff --git a/google/cloud/monitoring_v3/gapic/transports/uptime_check_service_grpc_transport.py b/google/cloud/monitoring_v3/gapic/transports/uptime_check_service_grpc_transport.py deleted file mode 100644 index b2272fdf..00000000 --- a/google/cloud/monitoring_v3/gapic/transports/uptime_check_service_grpc_transport.py +++ /dev/null @@ -1,199 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.monitoring_v3.proto import uptime_service_pb2_grpc - - -class UptimeCheckServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.monitoring.v3 UptimeCheckService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write", - ) - - def __init__( - self, channel=None, credentials=None, address="monitoring.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "uptime_check_service_stub": uptime_service_pb2_grpc.UptimeCheckServiceStub( - channel - ) - } - - @classmethod - def create_channel( - cls, address="monitoring.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def list_uptime_check_configs(self): - """Return the gRPC stub for :meth:`UptimeCheckServiceClient.list_uptime_check_configs`. - - Lists the existing valid Uptime check configurations for the project - (leaving out any invalid configurations). - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["uptime_check_service_stub"].ListUptimeCheckConfigs - - @property - def get_uptime_check_config(self): - """Return the gRPC stub for :meth:`UptimeCheckServiceClient.get_uptime_check_config`. - - Gets a single Uptime check configuration. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["uptime_check_service_stub"].GetUptimeCheckConfig - - @property - def create_uptime_check_config(self): - """Return the gRPC stub for :meth:`UptimeCheckServiceClient.create_uptime_check_config`. - - Creates a new Uptime check configuration. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["uptime_check_service_stub"].CreateUptimeCheckConfig - - @property - def update_uptime_check_config(self): - """Return the gRPC stub for :meth:`UptimeCheckServiceClient.update_uptime_check_config`. - - Updates an Uptime check configuration. You can either replace the - entire configuration with a new one or replace only certain fields in - the current configuration by specifying the fields to be updated via - ``updateMask``. Returns the updated configuration. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["uptime_check_service_stub"].UpdateUptimeCheckConfig - - @property - def delete_uptime_check_config(self): - """Return the gRPC stub for :meth:`UptimeCheckServiceClient.delete_uptime_check_config`. - - Deletes an Uptime check configuration. Note that this method will fail - if the Uptime check configuration is referenced by an alert policy or - other dependent configs that would be rendered invalid by the deletion. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["uptime_check_service_stub"].DeleteUptimeCheckConfig - - @property - def list_uptime_check_ips(self): - """Return the gRPC stub for :meth:`UptimeCheckServiceClient.list_uptime_check_ips`. - - Returns the list of IP addresses that checkers run from - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["uptime_check_service_stub"].ListUptimeCheckIps diff --git a/google/cloud/monitoring_v3/gapic/uptime_check_service_client.py b/google/cloud/monitoring_v3/gapic/uptime_check_service_client.py deleted file mode 100644 index 5a506293..00000000 --- a/google/cloud/monitoring_v3/gapic/uptime_check_service_client.py +++ /dev/null @@ -1,772 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.monitoring.v3 UptimeCheckService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.api import metric_pb2 as api_metric_pb2 -from google.api import monitored_resource_pb2 -from google.cloud.monitoring_v3.gapic import enums -from google.cloud.monitoring_v3.gapic import uptime_check_service_client_config -from google.cloud.monitoring_v3.gapic.transports import ( - uptime_check_service_grpc_transport, -) -from google.cloud.monitoring_v3.proto import alert_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc -from google.cloud.monitoring_v3.proto import common_pb2 -from google.cloud.monitoring_v3.proto import group_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2_grpc -from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc -from google.cloud.monitoring_v3.proto import notification_pb2 -from google.cloud.monitoring_v3.proto import notification_service_pb2 -from google.cloud.monitoring_v3.proto import notification_service_pb2_grpc -from google.cloud.monitoring_v3.proto import service_pb2 -from google.cloud.monitoring_v3.proto import service_service_pb2 -from google.cloud.monitoring_v3.proto import service_service_pb2_grpc -from google.cloud.monitoring_v3.proto import uptime_pb2 -from google.cloud.monitoring_v3.proto import uptime_service_pb2 -from google.cloud.monitoring_v3.proto import uptime_service_pb2_grpc -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-monitoring" -).version - - -class UptimeCheckServiceClient(object): - """ - The UptimeCheckService API is used to manage (list, create, delete, - edit) Uptime check configurations in the Stackdriver Monitoring product. - An Uptime check is a piece of configuration that determines which - resources and services to monitor for availability. These configurations - can also be configured interactively by navigating to the [Cloud - Console] (http://console.cloud.google.com), selecting the appropriate - project, clicking on "Monitoring" on the left-hand side to navigate to - Stackdriver, and then clicking on "Uptime". - """ - - SERVICE_ADDRESS = "monitoring.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.monitoring.v3.UptimeCheckService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - UptimeCheckServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - @classmethod - def uptime_check_config_path(cls, project, uptime_check_config): - """Return a fully-qualified uptime_check_config string.""" - return google.api_core.path_template.expand( - "projects/{project}/uptimeCheckConfigs/{uptime_check_config}", - project=project, - uptime_check_config=uptime_check_config, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.UptimeCheckServiceGrpcTransport, - Callable[[~.Credentials, type], ~.UptimeCheckServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = uptime_check_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=uptime_check_service_grpc_transport.UptimeCheckServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = uptime_check_service_grpc_transport.UptimeCheckServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def list_uptime_check_configs( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the existing valid Uptime check configurations for the project - (leaving out any invalid configurations). - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.UptimeCheckServiceClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_uptime_check_configs(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_uptime_check_configs(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The project whose Uptime check configurations are listed. - The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_uptime_check_configs" not in self._inner_api_calls: - self._inner_api_calls[ - "list_uptime_check_configs" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_uptime_check_configs, - default_retry=self._method_configs["ListUptimeCheckConfigs"].retry, - default_timeout=self._method_configs["ListUptimeCheckConfigs"].timeout, - client_info=self._client_info, - ) - - request = uptime_service_pb2.ListUptimeCheckConfigsRequest( - parent=parent, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_uptime_check_configs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="uptime_check_configs", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_uptime_check_config( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single Uptime check configuration. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.UptimeCheckServiceClient() - >>> - >>> name = client.uptime_check_config_path('[PROJECT]', '[UPTIME_CHECK_CONFIG]') - >>> - >>> response = client.get_uptime_check_config(name) - - Args: - name (str): Required. The Uptime check configuration to retrieve. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "get_uptime_check_config" not in self._inner_api_calls: - self._inner_api_calls[ - "get_uptime_check_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_uptime_check_config, - default_retry=self._method_configs["GetUptimeCheckConfig"].retry, - default_timeout=self._method_configs["GetUptimeCheckConfig"].timeout, - client_info=self._client_info, - ) - - request = uptime_service_pb2.GetUptimeCheckConfigRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_uptime_check_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_uptime_check_config( - self, - parent, - uptime_check_config, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new Uptime check configuration. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.UptimeCheckServiceClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `uptime_check_config`: - >>> uptime_check_config = {} - >>> - >>> response = client.create_uptime_check_config(parent, uptime_check_config) - - Args: - parent (str): Required. The project in which to create the Uptime check. The - format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER] - uptime_check_config (Union[dict, ~google.cloud.monitoring_v3.types.UptimeCheckConfig]): Required. The new Uptime check configuration. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "create_uptime_check_config" not in self._inner_api_calls: - self._inner_api_calls[ - "create_uptime_check_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_uptime_check_config, - default_retry=self._method_configs["CreateUptimeCheckConfig"].retry, - default_timeout=self._method_configs["CreateUptimeCheckConfig"].timeout, - client_info=self._client_info, - ) - - request = uptime_service_pb2.CreateUptimeCheckConfigRequest( - parent=parent, uptime_check_config=uptime_check_config - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_uptime_check_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_uptime_check_config( - self, - uptime_check_config, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an Uptime check configuration. You can either replace the - entire configuration with a new one or replace only certain fields in - the current configuration by specifying the fields to be updated via - ``updateMask``. Returns the updated configuration. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.UptimeCheckServiceClient() - >>> - >>> # TODO: Initialize `uptime_check_config`: - >>> uptime_check_config = {} - >>> - >>> response = client.update_uptime_check_config(uptime_check_config) - - Args: - uptime_check_config (Union[dict, ~google.cloud.monitoring_v3.types.UptimeCheckConfig]): Required. If an ``updateMask`` has been specified, this field gives - the values for the set of fields mentioned in the ``updateMask``. If an - ``updateMask`` has not been given, this Uptime check configuration - replaces the current configuration. If a field is mentioned in - ``updateMask`` but the corresonding field is omitted in this partial - Uptime check configuration, it has the effect of deleting/clearing the - field from the configuration on the server. - - The following fields can be updated: ``display_name``, ``http_check``, - ``tcp_check``, ``timeout``, ``content_matchers``, and - ``selected_regions``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` - update_mask (Union[dict, ~google.cloud.monitoring_v3.types.FieldMask]): Optional. If present, only the listed fields in the current Uptime check - configuration are updated with values from the new configuration. If this - field is empty, then the current configuration is completely replaced with - the new configuration. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.monitoring_v3.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "update_uptime_check_config" not in self._inner_api_calls: - self._inner_api_calls[ - "update_uptime_check_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_uptime_check_config, - default_retry=self._method_configs["UpdateUptimeCheckConfig"].retry, - default_timeout=self._method_configs["UpdateUptimeCheckConfig"].timeout, - client_info=self._client_info, - ) - - request = uptime_service_pb2.UpdateUptimeCheckConfigRequest( - uptime_check_config=uptime_check_config, update_mask=update_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("uptime_check_config.name", uptime_check_config.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_uptime_check_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_uptime_check_config( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an Uptime check configuration. Note that this method will fail - if the Uptime check configuration is referenced by an alert policy or - other dependent configs that would be rendered invalid by the deletion. - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.UptimeCheckServiceClient() - >>> - >>> name = client.uptime_check_config_path('[PROJECT]', '[UPTIME_CHECK_CONFIG]') - >>> - >>> client.delete_uptime_check_config(name) - - Args: - name (str): Required. The Uptime check configuration to delete. The format is: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "delete_uptime_check_config" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_uptime_check_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_uptime_check_config, - default_retry=self._method_configs["DeleteUptimeCheckConfig"].retry, - default_timeout=self._method_configs["DeleteUptimeCheckConfig"].timeout, - client_info=self._client_info, - ) - - request = uptime_service_pb2.DeleteUptimeCheckConfigRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_uptime_check_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_uptime_check_ips( - self, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns the list of IP addresses that checkers run from - - Example: - >>> from google.cloud import monitoring_v3 - >>> - >>> client = monitoring_v3.UptimeCheckServiceClient() - >>> - >>> # Iterate over all results - >>> for element in client.list_uptime_check_ips(): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_uptime_check_ips().pages: - ... for element in page: - ... # process element - ... pass - - Args: - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.monitoring_v3.types.UptimeCheckIp` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - if metadata is None: - metadata = [] - metadata = list(metadata) - # Wrap the transport method to add retry and timeout logic. - if "list_uptime_check_ips" not in self._inner_api_calls: - self._inner_api_calls[ - "list_uptime_check_ips" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_uptime_check_ips, - default_retry=self._method_configs["ListUptimeCheckIps"].retry, - default_timeout=self._method_configs["ListUptimeCheckIps"].timeout, - client_info=self._client_info, - ) - - request = uptime_service_pb2.ListUptimeCheckIpsRequest(page_size=page_size) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_uptime_check_ips"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="uptime_check_ips", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/google/cloud/monitoring_v3/gapic/uptime_check_service_client_config.py b/google/cloud/monitoring_v3/gapic/uptime_check_service_client_config.py deleted file mode 100644 index 0be9a06c..00000000 --- a/google/cloud/monitoring_v3/gapic/uptime_check_service_client_config.py +++ /dev/null @@ -1,53 +0,0 @@ -config = { - "interfaces": { - "google.monitoring.v3.UptimeCheckService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "ListUptimeCheckConfigs": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetUptimeCheckConfig": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateUptimeCheckConfig": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateUptimeCheckConfig": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteUptimeCheckConfig": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListUptimeCheckIps": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/monitoring_v3/proto/__init__.py b/google/cloud/monitoring_v3/proto/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/monitoring_v3/proto/alert_pb2.py b/google/cloud/monitoring_v3/proto/alert_pb2.py deleted file mode 100644 index d1b5d195..00000000 --- a/google/cloud/monitoring_v3/proto/alert_pb2.py +++ /dev/null @@ -1,1280 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/alert.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.monitoring_v3.proto import ( - common_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2, -) -from google.cloud.monitoring_v3.proto import ( - mutation_record_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_mutation__record__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/alert.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\nAlertProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n,google/cloud/monitoring_v3/proto/alert.proto\x12\x14google.monitoring.v3\x1a\x19google/api/resource.proto\x1a-google/cloud/monitoring_v3/proto/common.proto\x1a\x36google/cloud/monitoring_v3/proto/mutation_record.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xe9\x10\n\x0b\x41lertPolicy\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x46\n\rdocumentation\x18\r \x01(\x0b\x32/.google.monitoring.v3.AlertPolicy.Documentation\x12\x46\n\x0buser_labels\x18\x10 \x03(\x0b\x32\x31.google.monitoring.v3.AlertPolicy.UserLabelsEntry\x12?\n\nconditions\x18\x0c \x03(\x0b\x32+.google.monitoring.v3.AlertPolicy.Condition\x12I\n\x08\x63ombiner\x18\x06 \x01(\x0e\x32\x37.google.monitoring.v3.AlertPolicy.ConditionCombinerType\x12+\n\x07\x65nabled\x18\x11 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12$\n\x08validity\x18\x12 \x01(\x0b\x32\x12.google.rpc.Status\x12\x1d\n\x15notification_channels\x18\x0e \x03(\t\x12=\n\x0f\x63reation_record\x18\n \x01(\x0b\x32$.google.monitoring.v3.MutationRecord\x12=\n\x0fmutation_record\x18\x0b \x01(\x0b\x32$.google.monitoring.v3.MutationRecord\x1a\x33\n\rDocumentation\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x11\n\tmime_type\x18\x02 \x01(\t\x1a\x92\t\n\tCondition\x12\x0c\n\x04name\x18\x0c \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x06 \x01(\t\x12Z\n\x13\x63ondition_threshold\x18\x01 \x01(\x0b\x32;.google.monitoring.v3.AlertPolicy.Condition.MetricThresholdH\x00\x12U\n\x10\x63ondition_absent\x18\x02 \x01(\x0b\x32\x39.google.monitoring.v3.AlertPolicy.Condition.MetricAbsenceH\x00\x1a\x35\n\x07Trigger\x12\x0f\n\x05\x63ount\x18\x01 \x01(\x05H\x00\x12\x11\n\x07percent\x18\x02 \x01(\x01H\x00\x42\x06\n\x04type\x1a\x81\x03\n\x0fMetricThreshold\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x37\n\x0c\x61ggregations\x18\x08 \x03(\x0b\x32!.google.monitoring.v3.Aggregation\x12\x1a\n\x12\x64\x65nominator_filter\x18\t \x01(\t\x12\x43\n\x18\x64\x65nominator_aggregations\x18\n \x03(\x0b\x32!.google.monitoring.v3.Aggregation\x12\x38\n\ncomparison\x18\x04 \x01(\x0e\x32$.google.monitoring.v3.ComparisonType\x12\x17\n\x0fthreshold_value\x18\x05 \x01(\x01\x12+\n\x08\x64uration\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x44\n\x07trigger\x18\x07 \x01(\x0b\x32\x33.google.monitoring.v3.AlertPolicy.Condition.Trigger\x1a\xcb\x01\n\rMetricAbsence\x12\x0e\n\x06\x66ilter\x18\x01 \x01(\t\x12\x37\n\x0c\x61ggregations\x18\x05 \x03(\x0b\x32!.google.monitoring.v3.Aggregation\x12+\n\x08\x64uration\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x44\n\x07trigger\x18\x03 \x01(\x0b\x32\x33.google.monitoring.v3.AlertPolicy.Condition.Trigger:\x97\x02\xea\x41\x93\x02\n.monitoring.googleapis.com/AlertPolicyCondition\x12\x46projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}\x12Porganizations/{organization}/alertPolicies/{alert_policy}/conditions/{condition}\x12\x44\x66olders/{folder}/alertPolicies/{alert_policy}/conditions/{condition}\x12\x01*B\x0b\n\tcondition\x1a\x31\n\x0fUserLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"a\n\x15\x43onditionCombinerType\x12\x17\n\x13\x43OMBINE_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41ND\x10\x01\x12\x06\n\x02OR\x10\x02\x12\x1e\n\x1a\x41ND_WITH_MATCHING_RESOURCE\x10\x03:\xc9\x01\xea\x41\xc5\x01\n%monitoring.googleapis.com/AlertPolicy\x12/projects/{project}/alertPolicies/{alert_policy}\x12\x39organizations/{organization}/alertPolicies/{alert_policy}\x12-folders/{folder}/alertPolicies/{alert_policy}\x12\x01*B\xc2\x01\n\x18\x63om.google.monitoring.v3B\nAlertProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_mutation__record__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - ], -) - - -_ALERTPOLICY_CONDITIONCOMBINERTYPE = _descriptor.EnumDescriptor( - name="ConditionCombinerType", - full_name="google.monitoring.v3.AlertPolicy.ConditionCombinerType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="COMBINE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="AND", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="OR", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="AND_WITH_MATCHING_RESOURCE", - index=3, - number=3, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2142, - serialized_end=2239, -) -_sym_db.RegisterEnumDescriptor(_ALERTPOLICY_CONDITIONCOMBINERTYPE) - - -_ALERTPOLICY_DOCUMENTATION = _descriptor.Descriptor( - name="Documentation", - full_name="google.monitoring.v3.AlertPolicy.Documentation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="content", - full_name="google.monitoring.v3.AlertPolicy.Documentation.content", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mime_type", - full_name="google.monitoring.v3.AlertPolicy.Documentation.mime_type", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=865, - serialized_end=916, -) - -_ALERTPOLICY_CONDITION_TRIGGER = _descriptor.Descriptor( - name="Trigger", - full_name="google.monitoring.v3.AlertPolicy.Condition.Trigger", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="count", - full_name="google.monitoring.v3.AlertPolicy.Condition.Trigger.count", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="percent", - full_name="google.monitoring.v3.AlertPolicy.Condition.Trigger.percent", - index=1, - number=2, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="type", - full_name="google.monitoring.v3.AlertPolicy.Condition.Trigger.type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1147, - serialized_end=1200, -) - -_ALERTPOLICY_CONDITION_METRICTHRESHOLD = _descriptor.Descriptor( - name="MetricThreshold", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.filter", - index=0, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="aggregations", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations", - index=1, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="denominator_filter", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_filter", - index=2, - number=9, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="denominator_aggregations", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations", - index=3, - number=10, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="comparison", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison", - index=4, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="threshold_value", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.threshold_value", - index=5, - number=5, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="duration", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration", - index=6, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="trigger", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger", - index=7, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1203, - serialized_end=1588, -) - -_ALERTPOLICY_CONDITION_METRICABSENCE = _descriptor.Descriptor( - name="MetricAbsence", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricAbsence", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.filter", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="aggregations", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations", - index=1, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="duration", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="trigger", - full_name="google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1591, - serialized_end=1794, -) - -_ALERTPOLICY_CONDITION = _descriptor.Descriptor( - name="Condition", - full_name="google.monitoring.v3.AlertPolicy.Condition", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.AlertPolicy.Condition.name", - index=0, - number=12, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.AlertPolicy.Condition.display_name", - index=1, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="condition_threshold", - full_name="google.monitoring.v3.AlertPolicy.Condition.condition_threshold", - index=2, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="condition_absent", - full_name="google.monitoring.v3.AlertPolicy.Condition.condition_absent", - index=3, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _ALERTPOLICY_CONDITION_TRIGGER, - _ALERTPOLICY_CONDITION_METRICTHRESHOLD, - _ALERTPOLICY_CONDITION_METRICABSENCE, - ], - enum_types=[], - serialized_options=b"\352A\223\002\n.monitoring.googleapis.com/AlertPolicyCondition\022Fprojects/{project}/alertPolicies/{alert_policy}/conditions/{condition}\022Porganizations/{organization}/alertPolicies/{alert_policy}/conditions/{condition}\022Dfolders/{folder}/alertPolicies/{alert_policy}/conditions/{condition}\022\001*", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="condition", - full_name="google.monitoring.v3.AlertPolicy.Condition.condition", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=919, - serialized_end=2089, -) - -_ALERTPOLICY_USERLABELSENTRY = _descriptor.Descriptor( - name="UserLabelsEntry", - full_name="google.monitoring.v3.AlertPolicy.UserLabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.monitoring.v3.AlertPolicy.UserLabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.monitoring.v3.AlertPolicy.UserLabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2091, - serialized_end=2140, -) - -_ALERTPOLICY = _descriptor.Descriptor( - name="AlertPolicy", - full_name="google.monitoring.v3.AlertPolicy", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.AlertPolicy.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.AlertPolicy.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="documentation", - full_name="google.monitoring.v3.AlertPolicy.documentation", - index=2, - number=13, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="user_labels", - full_name="google.monitoring.v3.AlertPolicy.user_labels", - index=3, - number=16, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="conditions", - full_name="google.monitoring.v3.AlertPolicy.conditions", - index=4, - number=12, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="combiner", - full_name="google.monitoring.v3.AlertPolicy.combiner", - index=5, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="enabled", - full_name="google.monitoring.v3.AlertPolicy.enabled", - index=6, - number=17, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="validity", - full_name="google.monitoring.v3.AlertPolicy.validity", - index=7, - number=18, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="notification_channels", - full_name="google.monitoring.v3.AlertPolicy.notification_channels", - index=8, - number=14, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="creation_record", - full_name="google.monitoring.v3.AlertPolicy.creation_record", - index=9, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mutation_record", - full_name="google.monitoring.v3.AlertPolicy.mutation_record", - index=10, - number=11, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _ALERTPOLICY_DOCUMENTATION, - _ALERTPOLICY_CONDITION, - _ALERTPOLICY_USERLABELSENTRY, - ], - enum_types=[_ALERTPOLICY_CONDITIONCOMBINERTYPE], - serialized_options=b"\352A\305\001\n%monitoring.googleapis.com/AlertPolicy\022/projects/{project}/alertPolicies/{alert_policy}\0229organizations/{organization}/alertPolicies/{alert_policy}\022-folders/{folder}/alertPolicies/{alert_policy}\022\001*", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=290, - serialized_end=2443, -) - -_ALERTPOLICY_DOCUMENTATION.containing_type = _ALERTPOLICY -_ALERTPOLICY_CONDITION_TRIGGER.containing_type = _ALERTPOLICY_CONDITION -_ALERTPOLICY_CONDITION_TRIGGER.oneofs_by_name["type"].fields.append( - _ALERTPOLICY_CONDITION_TRIGGER.fields_by_name["count"] -) -_ALERTPOLICY_CONDITION_TRIGGER.fields_by_name[ - "count" -].containing_oneof = _ALERTPOLICY_CONDITION_TRIGGER.oneofs_by_name["type"] -_ALERTPOLICY_CONDITION_TRIGGER.oneofs_by_name["type"].fields.append( - _ALERTPOLICY_CONDITION_TRIGGER.fields_by_name["percent"] -) -_ALERTPOLICY_CONDITION_TRIGGER.fields_by_name[ - "percent" -].containing_oneof = _ALERTPOLICY_CONDITION_TRIGGER.oneofs_by_name["type"] -_ALERTPOLICY_CONDITION_METRICTHRESHOLD.fields_by_name[ - "aggregations" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._AGGREGATION -) -_ALERTPOLICY_CONDITION_METRICTHRESHOLD.fields_by_name[ - "denominator_aggregations" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._AGGREGATION -) -_ALERTPOLICY_CONDITION_METRICTHRESHOLD.fields_by_name[ - "comparison" -].enum_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._COMPARISONTYPE -) -_ALERTPOLICY_CONDITION_METRICTHRESHOLD.fields_by_name[ - "duration" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_ALERTPOLICY_CONDITION_METRICTHRESHOLD.fields_by_name[ - "trigger" -].message_type = _ALERTPOLICY_CONDITION_TRIGGER -_ALERTPOLICY_CONDITION_METRICTHRESHOLD.containing_type = _ALERTPOLICY_CONDITION -_ALERTPOLICY_CONDITION_METRICABSENCE.fields_by_name[ - "aggregations" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._AGGREGATION -) -_ALERTPOLICY_CONDITION_METRICABSENCE.fields_by_name[ - "duration" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_ALERTPOLICY_CONDITION_METRICABSENCE.fields_by_name[ - "trigger" -].message_type = _ALERTPOLICY_CONDITION_TRIGGER -_ALERTPOLICY_CONDITION_METRICABSENCE.containing_type = _ALERTPOLICY_CONDITION -_ALERTPOLICY_CONDITION.fields_by_name[ - "condition_threshold" -].message_type = _ALERTPOLICY_CONDITION_METRICTHRESHOLD -_ALERTPOLICY_CONDITION.fields_by_name[ - "condition_absent" -].message_type = _ALERTPOLICY_CONDITION_METRICABSENCE -_ALERTPOLICY_CONDITION.containing_type = _ALERTPOLICY -_ALERTPOLICY_CONDITION.oneofs_by_name["condition"].fields.append( - _ALERTPOLICY_CONDITION.fields_by_name["condition_threshold"] -) -_ALERTPOLICY_CONDITION.fields_by_name[ - "condition_threshold" -].containing_oneof = _ALERTPOLICY_CONDITION.oneofs_by_name["condition"] -_ALERTPOLICY_CONDITION.oneofs_by_name["condition"].fields.append( - _ALERTPOLICY_CONDITION.fields_by_name["condition_absent"] -) -_ALERTPOLICY_CONDITION.fields_by_name[ - "condition_absent" -].containing_oneof = _ALERTPOLICY_CONDITION.oneofs_by_name["condition"] -_ALERTPOLICY_USERLABELSENTRY.containing_type = _ALERTPOLICY -_ALERTPOLICY.fields_by_name["documentation"].message_type = _ALERTPOLICY_DOCUMENTATION -_ALERTPOLICY.fields_by_name["user_labels"].message_type = _ALERTPOLICY_USERLABELSENTRY -_ALERTPOLICY.fields_by_name["conditions"].message_type = _ALERTPOLICY_CONDITION -_ALERTPOLICY.fields_by_name["combiner"].enum_type = _ALERTPOLICY_CONDITIONCOMBINERTYPE -_ALERTPOLICY.fields_by_name[ - "enabled" -].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE -_ALERTPOLICY.fields_by_name[ - "validity" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_ALERTPOLICY.fields_by_name[ - "creation_record" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_mutation__record__pb2._MUTATIONRECORD -) -_ALERTPOLICY.fields_by_name[ - "mutation_record" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_mutation__record__pb2._MUTATIONRECORD -) -_ALERTPOLICY_CONDITIONCOMBINERTYPE.containing_type = _ALERTPOLICY -DESCRIPTOR.message_types_by_name["AlertPolicy"] = _ALERTPOLICY -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -AlertPolicy = _reflection.GeneratedProtocolMessageType( - "AlertPolicy", - (_message.Message,), - { - "Documentation": _reflection.GeneratedProtocolMessageType( - "Documentation", - (_message.Message,), - { - "DESCRIPTOR": _ALERTPOLICY_DOCUMENTATION, - "__module__": "google.cloud.monitoring_v3.proto.alert_pb2", - "__doc__": """A content string and a MIME type that describes the - content string’s format. - - - Attributes: - content: - The text of the documentation, interpreted according to - ``mime_type``. The content may not exceed 8,192 Unicode - characters and may not exceed more than 10,240 bytes when - encoded in UTF-8 format, whichever is smaller. - mime_type: - The format of the ``content`` field. Presently, only the value - ``"text/markdown"`` is supported. See `Markdown - `__ for more - information. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.AlertPolicy.Documentation) - }, - ), - "Condition": _reflection.GeneratedProtocolMessageType( - "Condition", - (_message.Message,), - { - "Trigger": _reflection.GeneratedProtocolMessageType( - "Trigger", - (_message.Message,), - { - "DESCRIPTOR": _ALERTPOLICY_CONDITION_TRIGGER, - "__module__": "google.cloud.monitoring_v3.proto.alert_pb2", - "__doc__": """Specifies how many time series must fail a predicate to - trigger a condition. If not specified, then a ``{count: 1}`` trigger is - used. - - - Attributes: - type: - A type of trigger. - count: - The absolute number of time series that must fail the - predicate for the condition to be triggered. - percent: - The percentage of time series that must fail the predicate for - the condition to be triggered. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.AlertPolicy.Condition.Trigger) - }, - ), - "MetricThreshold": _reflection.GeneratedProtocolMessageType( - "MetricThreshold", - (_message.Message,), - { - "DESCRIPTOR": _ALERTPOLICY_CONDITION_METRICTHRESHOLD, - "__module__": "google.cloud.monitoring_v3.proto.alert_pb2", - "__doc__": """A condition type that compares a collection of time series - against a threshold. - - - Attributes: - filter: - A `filter - `__ that - identifies which time series should be compared with the - threshold. The filter is similar to the one that is specified - in the ```ListTimeSeries`` request `__ - (that call is useful to verify the time series that will be - retrieved / processed) and must specify the metric type and - optionally may contain restrictions on resource type, resource - labels, and metric labels. This field may not exceed 2048 - Unicode characters in length. - aggregations: - Specifies the alignment of data points in individual time - series as well as how to combine the retrieved time series - together (such as when aggregating multiple streams on each - resource to a single stream for each resource or when - aggregating streams across all members of a group of - resrouces). Multiple aggregations are applied in the order - specified. This field is similar to the one in the - ```ListTimeSeries`` request `__. It is - advisable to use the ``ListTimeSeries`` method when debugging - this field. - denominator_filter: - A `filter - `__ that - identifies a time series that should be used as the - denominator of a ratio that will be compared with the - threshold. If a ``denominator_filter`` is specified, the time - series specified by the ``filter`` field will be used as the - numerator. The filter must specify the metric type and - optionally may contain restrictions on resource type, resource - labels, and metric labels. This field may not exceed 2048 - Unicode characters in length. - denominator_aggregations: - Specifies the alignment of data points in individual time - series selected by ``denominatorFilter`` as well as how to - combine the retrieved time series together (such as when - aggregating multiple streams on each resource to a single - stream for each resource or when aggregating streams across - all members of a group of resources). When computing ratios, - the ``aggregations`` and ``denominator_aggregations`` fields - must use the same alignment period and produce time series - that have the same periodicity and labels. - comparison: - The comparison to apply between the time series (indicated by - ``filter`` and ``aggregation``) and the threshold (indicated - by ``threshold_value``). The comparison is applied on each - time series, with the time series on the left-hand side and - the threshold on the right-hand side. Only ``COMPARISON_LT`` - and ``COMPARISON_GT`` are supported currently. - threshold_value: - A value against which to compare the time series. - duration: - The amount of time that a time series must violate the - threshold to be considered failing. Currently, only values - that are a multiple of a minute–e.g., 0, 60, 120, or 300 - seconds–are supported. If an invalid value is given, an error - will be returned. When choosing a duration, it is useful to - keep in mind the frequency of the underlying time series data - (which may also be affected by any alignments specified in the - ``aggregations`` field); a good duration is long enough so - that a single outlier does not generate spurious alerts, but - short enough that unhealthy states are detected and alerted on - quickly. - trigger: - The number/percent of time series for which the comparison - must hold in order for the condition to trigger. If - unspecified, then the condition will trigger if the comparison - is true for any of the time series that have been identified - by ``filter`` and ``aggregations``, or by the ratio, if - ``denominator_filter`` and ``denominator_aggregations`` are - specified. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.AlertPolicy.Condition.MetricThreshold) - }, - ), - "MetricAbsence": _reflection.GeneratedProtocolMessageType( - "MetricAbsence", - (_message.Message,), - { - "DESCRIPTOR": _ALERTPOLICY_CONDITION_METRICABSENCE, - "__module__": "google.cloud.monitoring_v3.proto.alert_pb2", - "__doc__": """A condition type that checks that monitored resources are - reporting data. The configuration defines a metric and a set of - monitored resources. The predicate is considered in violation when a - time series for the specified metric of a monitored resource does not - include any data in the specified ``duration``. - - - Attributes: - filter: - A `filter - `__ that - identifies which time series should be compared with the - threshold. The filter is similar to the one that is specified - in the ```ListTimeSeries`` request `__ - (that call is useful to verify the time series that will be - retrieved / processed) and must specify the metric type and - optionally may contain restrictions on resource type, resource - labels, and metric labels. This field may not exceed 2048 - Unicode characters in length. - aggregations: - Specifies the alignment of data points in individual time - series as well as how to combine the retrieved time series - together (such as when aggregating multiple streams on each - resource to a single stream for each resource or when - aggregating streams across all members of a group of - resrouces). Multiple aggregations are applied in the order - specified. This field is similar to the one in the - ```ListTimeSeries`` request `__. It is - advisable to use the ``ListTimeSeries`` method when debugging - this field. - duration: - The amount of time that a time series must fail to report new - data to be considered failing. Currently, only values that are - a multiple of a minute–e.g. 60, 120, or 300 seconds–are - supported. If an invalid value is given, an error will be - returned. The ``Duration.nanos`` field is ignored. - trigger: - The number/percent of time series for which the comparison - must hold in order for the condition to trigger. If - unspecified, then the condition will trigger if the comparison - is true for any of the time series that have been identified - by ``filter`` and ``aggregations``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.AlertPolicy.Condition.MetricAbsence) - }, - ), - "DESCRIPTOR": _ALERTPOLICY_CONDITION, - "__module__": "google.cloud.monitoring_v3.proto.alert_pb2", - "__doc__": """A condition is a true/false test that determines when an - alerting policy should open an incident. If a condition evaluates to - true, it signifies that something is wrong. - - - Attributes: - name: - Required if the condition exists. The unique resource name for - this condition. Its format is: :: projects/[PROJECT_ID_OR - _NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] - ``[CONDITION_ID]`` is assigned by Stackdriver Monitoring when - the condition is created as part of a new or updated alerting - policy. When calling the [alertPolicies.create][google.monito - ring.v3.AlertPolicyService.CreateAlertPolicy] method, do not - include the ``name`` field in the conditions of the requested - alerting policy. Stackdriver Monitoring creates the condition - identifiers and includes them in the new policy. When calling - the [alertPolicies.update][google.monitoring.v3.AlertPolicySer - vice.UpdateAlertPolicy] method to update a policy, including a - condition ``name`` causes the existing condition to be - updated. Conditions without names are added to the updated - policy. Existing conditions are deleted if they are not - updated. Best practice is to preserve ``[CONDITION_ID]`` if - you make only small changes, such as those to condition - thresholds, durations, or trigger values. Otherwise, treat the - change as a new condition and let the existing condition be - deleted. - display_name: - A short name or phrase used to identify the condition in - dashboards, notifications, and incidents. To avoid confusion, - don’t use the same display name for multiple conditions in the - same policy. - condition: - Only one of the following condition types will be specified. - condition_threshold: - A condition that compares a time series against a threshold. - condition_absent: - A condition that checks that a time series continues to - receive new data points. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.AlertPolicy.Condition) - }, - ), - "UserLabelsEntry": _reflection.GeneratedProtocolMessageType( - "UserLabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _ALERTPOLICY_USERLABELSENTRY, - "__module__": "google.cloud.monitoring_v3.proto.alert_pb2" - # @@protoc_insertion_point(class_scope:google.monitoring.v3.AlertPolicy.UserLabelsEntry) - }, - ), - "DESCRIPTOR": _ALERTPOLICY, - "__module__": "google.cloud.monitoring_v3.proto.alert_pb2", - "__doc__": """A description of the conditions under which some aspect of - your system is considered to be “unhealthy” and the ways to notify - people or services about this state. For an overview of alert policies, - see `Introduction to - Alerting `__. - - - Attributes: - name: - Required if the policy exists. The resource name for this - policy. The format is: :: projects/[PROJECT_ID_OR_NUMBER] - /alertPolicies/[ALERT_POLICY_ID] ``[ALERT_POLICY_ID]`` is - assigned by Stackdriver Monitoring when the policy is created. - When calling the [alertPolicies.create][google.monitoring.v3.A - lertPolicyService.CreateAlertPolicy] method, do not include - the ``name`` field in the alerting policy passed as part of - the request. - display_name: - A short name or phrase used to identify the policy in - dashboards, notifications, and incidents. To avoid confusion, - don’t use the same display name for multiple policies in the - same project. The name is limited to 512 Unicode characters. - documentation: - Documentation that is included with notifications and - incidents related to this policy. Best practice is for the - documentation to include information to help responders - understand, mitigate, escalate, and correct the underlying - problems detected by the alerting policy. Notification - channels that have limited capacity might not show this - documentation. - user_labels: - User-supplied key/value data to be used for organizing and - identifying the ``AlertPolicy`` objects. The field can - contain up to 64 entries. Each key and value is limited to 63 - Unicode characters or 128 bytes, whichever is smaller. Labels - and values can contain only lowercase letters, numerals, - underscores, and dashes. Keys must begin with a letter. - conditions: - A list of conditions for the policy. The conditions are - combined by AND or OR according to the ``combiner`` field. If - the combined conditions evaluate to true, then an incident is - created. A policy can have from one to six conditions. If - ``condition_time_series_query_language`` is present, it must - be the only ``condition``. - combiner: - How to combine the results of multiple conditions to determine - if an incident should be opened. If - ``condition_time_series_query_language`` is present, this must - be ``COMBINE_UNSPECIFIED``. - enabled: - Whether or not the policy is enabled. On write, the default - interpretation if unset is that the policy is enabled. On - read, clients should not make any assumption about the state - if it has not been populated. The field should always be - populated on List and Get operations, unless a field - projection has been specified that strips it out. - validity: - Read-only description of how the alert policy is invalid. OK - if the alert policy is valid. If not OK, the alert policy will - not generate incidents. - notification_channels: - Identifies the notification channels to which notifications - should be sent when incidents are opened or closed or when new - violations occur on an already opened incident. Each element - of this array corresponds to the ``name`` field in each of the - [``NotificationChannel``][google.monitoring.v3.NotificationCha - nnel] objects that are returned from the - [``ListNotificationChannels``] [google.monitoring.v3.Notificat - ionChannelService.ListNotificationChannels] method. The format - of the entries in this field is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - creation_record: - A read-only record of the creation of the alerting policy. If - provided in a call to create or update, this field will be - ignored. - mutation_record: - A read-only record of the most recent change to the alerting - policy. If provided in a call to create or update, this field - will be ignored. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.AlertPolicy) - }, -) -_sym_db.RegisterMessage(AlertPolicy) -_sym_db.RegisterMessage(AlertPolicy.Documentation) -_sym_db.RegisterMessage(AlertPolicy.Condition) -_sym_db.RegisterMessage(AlertPolicy.Condition.Trigger) -_sym_db.RegisterMessage(AlertPolicy.Condition.MetricThreshold) -_sym_db.RegisterMessage(AlertPolicy.Condition.MetricAbsence) -_sym_db.RegisterMessage(AlertPolicy.UserLabelsEntry) - - -DESCRIPTOR._options = None -_ALERTPOLICY_CONDITION._options = None -_ALERTPOLICY_USERLABELSENTRY._options = None -_ALERTPOLICY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/alert_pb2_grpc.py b/google/cloud/monitoring_v3/proto/alert_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/alert_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/alert_service_pb2.py b/google/cloud/monitoring_v3/proto/alert_service_pb2.py deleted file mode 100644 index ea86a07f..00000000 --- a/google/cloud/monitoring_v3/proto/alert_service_pb2.py +++ /dev/null @@ -1,681 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/alert_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.monitoring_v3.proto import ( - alert_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/alert_service.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\021AlertServiceProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n4google/cloud/monitoring_v3/proto/alert_service.proto\x12\x14google.monitoring.v3\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a,google/cloud/monitoring_v3/proto/alert.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto"\x95\x01\n\x18\x43reateAlertPolicyRequest\x12;\n\x04name\x18\x03 \x01(\tB-\xe0\x41\x02\xfa\x41\'\x12%monitoring.googleapis.com/AlertPolicy\x12<\n\x0c\x61lert_policy\x18\x02 \x01(\x0b\x32!.google.monitoring.v3.AlertPolicyB\x03\xe0\x41\x02"T\n\x15GetAlertPolicyRequest\x12;\n\x04name\x18\x03 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%monitoring.googleapis.com/AlertPolicy"\xa0\x01\n\x18ListAlertPoliciesRequest\x12;\n\x04name\x18\x04 \x01(\tB-\xe0\x41\x02\xfa\x41\'\x12%monitoring.googleapis.com/AlertPolicy\x12\x0e\n\x06\x66ilter\x18\x05 \x01(\t\x12\x10\n\x08order_by\x18\x06 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"o\n\x19ListAlertPoliciesResponse\x12\x39\n\x0e\x61lert_policies\x18\x03 \x03(\x0b\x32!.google.monitoring.v3.AlertPolicy\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x89\x01\n\x18UpdateAlertPolicyRequest\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12<\n\x0c\x61lert_policy\x18\x03 \x01(\x0b\x32!.google.monitoring.v3.AlertPolicyB\x03\xe0\x41\x02"W\n\x18\x44\x65leteAlertPolicyRequest\x12;\n\x04name\x18\x03 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%monitoring.googleapis.com/AlertPolicy2\x9e\x08\n\x12\x41lertPolicyService\x12\xa8\x01\n\x11ListAlertPolicies\x12..google.monitoring.v3.ListAlertPoliciesRequest\x1a/.google.monitoring.v3.ListAlertPoliciesResponse"2\x82\xd3\xe4\x93\x02%\x12#/v3/{name=projects/*}/alertPolicies\xda\x41\x04name\x12\x96\x01\n\x0eGetAlertPolicy\x12+.google.monitoring.v3.GetAlertPolicyRequest\x1a!.google.monitoring.v3.AlertPolicy"4\x82\xd3\xe4\x93\x02\'\x12%/v3/{name=projects/*/alertPolicies/*}\xda\x41\x04name\x12\xb5\x01\n\x11\x43reateAlertPolicy\x12..google.monitoring.v3.CreateAlertPolicyRequest\x1a!.google.monitoring.v3.AlertPolicy"M\x82\xd3\xe4\x93\x02\x33"#/v3/{name=projects/*}/alertPolicies:\x0c\x61lert_policy\xda\x41\x11name,alert_policy\x12\x91\x01\n\x11\x44\x65leteAlertPolicy\x12..google.monitoring.v3.DeleteAlertPolicyRequest\x1a\x16.google.protobuf.Empty"4\x82\xd3\xe4\x93\x02\'*%/v3/{name=projects/*/alertPolicies/*}\xda\x41\x04name\x12\xcb\x01\n\x11UpdateAlertPolicy\x12..google.monitoring.v3.UpdateAlertPolicyRequest\x1a!.google.monitoring.v3.AlertPolicy"c\x82\xd3\xe4\x93\x02\x42\x32\x32/v3/{alert_policy.name=projects/*/alertPolicies/*}:\x0c\x61lert_policy\xda\x41\x18update_mask,alert_policy\x1a\xa9\x01\xca\x41\x19monitoring.googleapis.com\xd2\x41\x89\x01https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.readB\xc9\x01\n\x18\x63om.google.monitoring.v3B\x11\x41lertServiceProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - ], -) - - -_CREATEALERTPOLICYREQUEST = _descriptor.Descriptor( - name="CreateAlertPolicyRequest", - full_name="google.monitoring.v3.CreateAlertPolicyRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.CreateAlertPolicyRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A'\022%monitoring.googleapis.com/AlertPolicy", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="alert_policy", - full_name="google.monitoring.v3.CreateAlertPolicyRequest.alert_policy", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=303, - serialized_end=452, -) - - -_GETALERTPOLICYREQUEST = _descriptor.Descriptor( - name="GetAlertPolicyRequest", - full_name="google.monitoring.v3.GetAlertPolicyRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetAlertPolicyRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A'\n%monitoring.googleapis.com/AlertPolicy", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=454, - serialized_end=538, -) - - -_LISTALERTPOLICIESREQUEST = _descriptor.Descriptor( - name="ListAlertPoliciesRequest", - full_name="google.monitoring.v3.ListAlertPoliciesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ListAlertPoliciesRequest.name", - index=0, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A'\022%monitoring.googleapis.com/AlertPolicy", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.ListAlertPoliciesRequest.filter", - index=1, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.monitoring.v3.ListAlertPoliciesRequest.order_by", - index=2, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListAlertPoliciesRequest.page_size", - index=3, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListAlertPoliciesRequest.page_token", - index=4, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=541, - serialized_end=701, -) - - -_LISTALERTPOLICIESRESPONSE = _descriptor.Descriptor( - name="ListAlertPoliciesResponse", - full_name="google.monitoring.v3.ListAlertPoliciesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="alert_policies", - full_name="google.monitoring.v3.ListAlertPoliciesResponse.alert_policies", - index=0, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListAlertPoliciesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=703, - serialized_end=814, -) - - -_UPDATEALERTPOLICYREQUEST = _descriptor.Descriptor( - name="UpdateAlertPolicyRequest", - full_name="google.monitoring.v3.UpdateAlertPolicyRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.monitoring.v3.UpdateAlertPolicyRequest.update_mask", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="alert_policy", - full_name="google.monitoring.v3.UpdateAlertPolicyRequest.alert_policy", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=817, - serialized_end=954, -) - - -_DELETEALERTPOLICYREQUEST = _descriptor.Descriptor( - name="DeleteAlertPolicyRequest", - full_name="google.monitoring.v3.DeleteAlertPolicyRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.DeleteAlertPolicyRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A'\n%monitoring.googleapis.com/AlertPolicy", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=956, - serialized_end=1043, -) - -_CREATEALERTPOLICYREQUEST.fields_by_name[ - "alert_policy" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2._ALERTPOLICY -) -_LISTALERTPOLICIESRESPONSE.fields_by_name[ - "alert_policies" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2._ALERTPOLICY -) -_UPDATEALERTPOLICYREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_UPDATEALERTPOLICYREQUEST.fields_by_name[ - "alert_policy" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2._ALERTPOLICY -) -DESCRIPTOR.message_types_by_name["CreateAlertPolicyRequest"] = _CREATEALERTPOLICYREQUEST -DESCRIPTOR.message_types_by_name["GetAlertPolicyRequest"] = _GETALERTPOLICYREQUEST -DESCRIPTOR.message_types_by_name["ListAlertPoliciesRequest"] = _LISTALERTPOLICIESREQUEST -DESCRIPTOR.message_types_by_name[ - "ListAlertPoliciesResponse" -] = _LISTALERTPOLICIESRESPONSE -DESCRIPTOR.message_types_by_name["UpdateAlertPolicyRequest"] = _UPDATEALERTPOLICYREQUEST -DESCRIPTOR.message_types_by_name["DeleteAlertPolicyRequest"] = _DELETEALERTPOLICYREQUEST -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateAlertPolicyRequest = _reflection.GeneratedProtocolMessageType( - "CreateAlertPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEALERTPOLICYREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.alert_service_pb2", - "__doc__": """The protocol for the ``CreateAlertPolicy`` request. - - - Attributes: - name: - Required. The project in which to create the alerting policy. - The format is: :: projects/[PROJECT_ID_OR_NUMBER] Note - that this field names the parent container in which the - alerting policy will be written, not the name of the created - policy. The alerting policy that is returned will have a name - that contains a normalized representation of this name as a - prefix but adds a suffix of the form - ``/alertPolicies/[ALERT_POLICY_ID]``, identifying the policy - in the container. - alert_policy: - Required. The requested alerting policy. You should omit the - ``name`` field in this policy. The name will be returned in - the new policy, including a new ``[ALERT_POLICY_ID]`` value. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateAlertPolicyRequest) - }, -) -_sym_db.RegisterMessage(CreateAlertPolicyRequest) - -GetAlertPolicyRequest = _reflection.GeneratedProtocolMessageType( - "GetAlertPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETALERTPOLICYREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.alert_service_pb2", - "__doc__": """The protocol for the ``GetAlertPolicy`` request. - - - Attributes: - name: - Required. The alerting policy to retrieve. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID - ] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetAlertPolicyRequest) - }, -) -_sym_db.RegisterMessage(GetAlertPolicyRequest) - -ListAlertPoliciesRequest = _reflection.GeneratedProtocolMessageType( - "ListAlertPoliciesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTALERTPOLICIESREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.alert_service_pb2", - "__doc__": """The protocol for the ``ListAlertPolicies`` request. - - - Attributes: - name: - Required. The project whose alert policies are to be listed. - The format is: :: projects/[PROJECT_ID_OR_NUMBER] Note - that this field names the parent container in which the - alerting policies to be listed are stored. To retrieve a - single alerting policy by name, use the [GetAlertPolicy][googl - e.monitoring.v3.AlertPolicyService.GetAlertPolicy] operation, - instead. - filter: - If provided, this field specifies the criteria that must be - met by alert policies to be included in the response. For - more details, see `sorting and filtering - `__. - order_by: - A comma-separated list of fields by which to sort the result. - Supports the same set of field references as the ``filter`` - field. Entries can be prefixed with a minus sign to sort by - the field in descending order. For more details, see `sorting - and filtering - `__. - page_size: - The maximum number of results to return in a single response. - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return more - results from the previous method call. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListAlertPoliciesRequest) - }, -) -_sym_db.RegisterMessage(ListAlertPoliciesRequest) - -ListAlertPoliciesResponse = _reflection.GeneratedProtocolMessageType( - "ListAlertPoliciesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTALERTPOLICIESRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.alert_service_pb2", - "__doc__": """The protocol for the ``ListAlertPolicies`` response. - - - Attributes: - alert_policies: - The returned alert policies. - next_page_token: - If there might be more results than were returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListAlertPoliciesResponse) - }, -) -_sym_db.RegisterMessage(ListAlertPoliciesResponse) - -UpdateAlertPolicyRequest = _reflection.GeneratedProtocolMessageType( - "UpdateAlertPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEALERTPOLICYREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.alert_service_pb2", - "__doc__": """The protocol for the ``UpdateAlertPolicy`` request. - - - Attributes: - update_mask: - Optional. A list of alerting policy field names. If this field - is not empty, each listed field in the existing alerting - policy is set to the value of the corresponding field in the - supplied policy (``alert_policy``), or to the field’s default - value if the field is not in the supplied alerting policy. - Fields not listed retain their previous value. Examples of - valid field masks include ``display_name``, ``documentation``, - ``documentation.content``, ``documentation.mime_type``, - ``user_labels``, ``user_label.nameofkey``, ``enabled``, - ``conditions``, ``combiner``, etc. If this field is empty, - then the supplied alerting policy replaces the existing - policy. It is the same as deleting the existing policy and - adding the supplied policy, except for the following: - The - new policy will have the same ``[ALERT_POLICY_ID]`` as the - former policy. This gives you continuity with the former - policy in your notifications and incidents. - Conditions - in the new policy will keep their former ``[CONDITION_ID]`` - if the supplied condition includes the ``name`` field with - that ``[CONDITION_ID]``. If the supplied condition omits - the ``name`` field, then a new ``[CONDITION_ID]`` is created. - alert_policy: - Required. The updated alerting policy or the updated values - for the fields listed in ``update_mask``. If ``update_mask`` - is not empty, any fields in this policy that are not in - ``update_mask`` are ignored. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UpdateAlertPolicyRequest) - }, -) -_sym_db.RegisterMessage(UpdateAlertPolicyRequest) - -DeleteAlertPolicyRequest = _reflection.GeneratedProtocolMessageType( - "DeleteAlertPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEALERTPOLICYREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.alert_service_pb2", - "__doc__": """The protocol for the ``DeleteAlertPolicy`` request. - - - Attributes: - name: - Required. The alerting policy to delete. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID - ] For more information, see - [AlertPolicy][google.monitoring.v3.AlertPolicy]. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteAlertPolicyRequest) - }, -) -_sym_db.RegisterMessage(DeleteAlertPolicyRequest) - - -DESCRIPTOR._options = None -_CREATEALERTPOLICYREQUEST.fields_by_name["name"]._options = None -_CREATEALERTPOLICYREQUEST.fields_by_name["alert_policy"]._options = None -_GETALERTPOLICYREQUEST.fields_by_name["name"]._options = None -_LISTALERTPOLICIESREQUEST.fields_by_name["name"]._options = None -_UPDATEALERTPOLICYREQUEST.fields_by_name["alert_policy"]._options = None -_DELETEALERTPOLICYREQUEST.fields_by_name["name"]._options = None - -_ALERTPOLICYSERVICE = _descriptor.ServiceDescriptor( - name="AlertPolicyService", - full_name="google.monitoring.v3.AlertPolicyService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\031monitoring.googleapis.com\322A\211\001https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.read", - serialized_start=1046, - serialized_end=2100, - methods=[ - _descriptor.MethodDescriptor( - name="ListAlertPolicies", - full_name="google.monitoring.v3.AlertPolicyService.ListAlertPolicies", - index=0, - containing_service=None, - input_type=_LISTALERTPOLICIESREQUEST, - output_type=_LISTALERTPOLICIESRESPONSE, - serialized_options=b"\202\323\344\223\002%\022#/v3/{name=projects/*}/alertPolicies\332A\004name", - ), - _descriptor.MethodDescriptor( - name="GetAlertPolicy", - full_name="google.monitoring.v3.AlertPolicyService.GetAlertPolicy", - index=1, - containing_service=None, - input_type=_GETALERTPOLICYREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2._ALERTPOLICY, - serialized_options=b"\202\323\344\223\002'\022%/v3/{name=projects/*/alertPolicies/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="CreateAlertPolicy", - full_name="google.monitoring.v3.AlertPolicyService.CreateAlertPolicy", - index=2, - containing_service=None, - input_type=_CREATEALERTPOLICYREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2._ALERTPOLICY, - serialized_options=b'\202\323\344\223\0023"#/v3/{name=projects/*}/alertPolicies:\014alert_policy\332A\021name,alert_policy', - ), - _descriptor.MethodDescriptor( - name="DeleteAlertPolicy", - full_name="google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy", - index=3, - containing_service=None, - input_type=_DELETEALERTPOLICYREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002'*%/v3/{name=projects/*/alertPolicies/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="UpdateAlertPolicy", - full_name="google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy", - index=4, - containing_service=None, - input_type=_UPDATEALERTPOLICYREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2._ALERTPOLICY, - serialized_options=b"\202\323\344\223\002B22/v3/{alert_policy.name=projects/*/alertPolicies/*}:\014alert_policy\332A\030update_mask,alert_policy", - ), - ], -) -_sym_db.RegisterServiceDescriptor(_ALERTPOLICYSERVICE) - -DESCRIPTOR.services_by_name["AlertPolicyService"] = _ALERTPOLICYSERVICE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/alert_service_pb2_grpc.py b/google/cloud/monitoring_v3/proto/alert_service_pb2_grpc.py deleted file mode 100644 index d5773d78..00000000 --- a/google/cloud/monitoring_v3/proto/alert_service_pb2_grpc.py +++ /dev/null @@ -1,140 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.monitoring_v3.proto import ( - alert_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2, -) -from google.cloud.monitoring_v3.proto import ( - alert_service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class AlertPolicyServiceStub(object): - """The AlertPolicyService API is used to manage (list, create, delete, - edit) alert policies in Stackdriver Monitoring. An alerting policy is - a description of the conditions under which some aspect of your - system is considered to be "unhealthy" and the ways to notify - people or services about this state. In addition to using this API, alert - policies can also be managed through - [Stackdriver Monitoring](https://cloud.google.com/monitoring/docs/), - which can be reached by clicking the "Monitoring" tab in - [Cloud Console](https://console.cloud.google.com/). - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ListAlertPolicies = channel.unary_unary( - "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.ListAlertPoliciesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.ListAlertPoliciesResponse.FromString, - ) - self.GetAlertPolicy = channel.unary_unary( - "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.GetAlertPolicyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.FromString, - ) - self.CreateAlertPolicy = channel.unary_unary( - "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.CreateAlertPolicyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.FromString, - ) - self.DeleteAlertPolicy = channel.unary_unary( - "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.DeleteAlertPolicyRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.UpdateAlertPolicy = channel.unary_unary( - "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.UpdateAlertPolicyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.FromString, - ) - - -class AlertPolicyServiceServicer(object): - """The AlertPolicyService API is used to manage (list, create, delete, - edit) alert policies in Stackdriver Monitoring. An alerting policy is - a description of the conditions under which some aspect of your - system is considered to be "unhealthy" and the ways to notify - people or services about this state. In addition to using this API, alert - policies can also be managed through - [Stackdriver Monitoring](https://cloud.google.com/monitoring/docs/), - which can be reached by clicking the "Monitoring" tab in - [Cloud Console](https://console.cloud.google.com/). - """ - - def ListAlertPolicies(self, request, context): - """Lists the existing alerting policies for the project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetAlertPolicy(self, request, context): - """Gets a single alerting policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateAlertPolicy(self, request, context): - """Creates a new alerting policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteAlertPolicy(self, request, context): - """Deletes an alerting policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateAlertPolicy(self, request, context): - """Updates an alerting policy. You can either replace the entire policy with - a new one or replace only certain fields in the current alerting policy by - specifying the fields to be updated via `updateMask`. Returns the - updated alerting policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_AlertPolicyServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListAlertPolicies": grpc.unary_unary_rpc_method_handler( - servicer.ListAlertPolicies, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.ListAlertPoliciesRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.ListAlertPoliciesResponse.SerializeToString, - ), - "GetAlertPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetAlertPolicy, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.GetAlertPolicyRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.SerializeToString, - ), - "CreateAlertPolicy": grpc.unary_unary_rpc_method_handler( - servicer.CreateAlertPolicy, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.CreateAlertPolicyRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.SerializeToString, - ), - "DeleteAlertPolicy": grpc.unary_unary_rpc_method_handler( - servicer.DeleteAlertPolicy, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.DeleteAlertPolicyRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "UpdateAlertPolicy": grpc.unary_unary_rpc_method_handler( - servicer.UpdateAlertPolicy, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.UpdateAlertPolicyRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.monitoring.v3.AlertPolicyService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/google/cloud/monitoring_v3/proto/common.proto b/google/cloud/monitoring_v3/proto/common.proto index f1b38c23..26242225 100644 --- a/google/cloud/monitoring_v3/proto/common.proto +++ b/google/cloud/monitoring_v3/proto/common.proto @@ -51,7 +51,11 @@ message TypedValue { } } -// A closed time interval. It extends from the start time to the end time, and includes both: `[startTime, endTime]`. Valid time intervals depend on the [`MetricKind`](/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) of the metric value. In no case can the end time be earlier than the start time. +// A closed time interval. It extends from the start time to the end time, and +// includes both: `[startTime, endTime]`. Valid time intervals depend on the +// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) +// of the metric value. In no case can the end time be earlier than the start +// time. // // * For a `GAUGE` metric, the `startTime` value is technically optional; if // no value is specified, the start time defaults to the value of the diff --git a/google/cloud/monitoring_v3/proto/common_pb2.py b/google/cloud/monitoring_v3/proto/common_pb2.py deleted file mode 100644 index f2421731..00000000 --- a/google/cloud/monitoring_v3/proto/common_pb2.py +++ /dev/null @@ -1,817 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/common.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import distribution_pb2 as google_dot_api_dot_distribution__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/common.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\013CommonProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n-google/cloud/monitoring_v3/proto/common.proto\x12\x14google.monitoring.v3\x1a\x1dgoogle/api/distribution.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xaa\x01\n\nTypedValue\x12\x14\n\nbool_value\x18\x01 \x01(\x08H\x00\x12\x15\n\x0bint64_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x04 \x01(\tH\x00\x12\x36\n\x12\x64istribution_value\x18\x05 \x01(\x0b\x32\x18.google.api.DistributionH\x00\x42\x07\n\x05value"l\n\x0cTimeInterval\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xad\x07\n\x0b\x41ggregation\x12\x33\n\x10\x61lignment_period\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x45\n\x12per_series_aligner\x18\x02 \x01(\x0e\x32).google.monitoring.v3.Aggregation.Aligner\x12G\n\x14\x63ross_series_reducer\x18\x04 \x01(\x0e\x32).google.monitoring.v3.Aggregation.Reducer\x12\x17\n\x0fgroup_by_fields\x18\x05 \x03(\t"\x8b\x03\n\x07\x41ligner\x12\x0e\n\nALIGN_NONE\x10\x00\x12\x0f\n\x0b\x41LIGN_DELTA\x10\x01\x12\x0e\n\nALIGN_RATE\x10\x02\x12\x15\n\x11\x41LIGN_INTERPOLATE\x10\x03\x12\x14\n\x10\x41LIGN_NEXT_OLDER\x10\x04\x12\r\n\tALIGN_MIN\x10\n\x12\r\n\tALIGN_MAX\x10\x0b\x12\x0e\n\nALIGN_MEAN\x10\x0c\x12\x0f\n\x0b\x41LIGN_COUNT\x10\r\x12\r\n\tALIGN_SUM\x10\x0e\x12\x10\n\x0c\x41LIGN_STDDEV\x10\x0f\x12\x14\n\x10\x41LIGN_COUNT_TRUE\x10\x10\x12\x15\n\x11\x41LIGN_COUNT_FALSE\x10\x18\x12\x17\n\x13\x41LIGN_FRACTION_TRUE\x10\x11\x12\x17\n\x13\x41LIGN_PERCENTILE_99\x10\x12\x12\x17\n\x13\x41LIGN_PERCENTILE_95\x10\x13\x12\x17\n\x13\x41LIGN_PERCENTILE_50\x10\x14\x12\x17\n\x13\x41LIGN_PERCENTILE_05\x10\x15\x12\x18\n\x14\x41LIGN_PERCENT_CHANGE\x10\x17"\xb1\x02\n\x07Reducer\x12\x0f\n\x0bREDUCE_NONE\x10\x00\x12\x0f\n\x0bREDUCE_MEAN\x10\x01\x12\x0e\n\nREDUCE_MIN\x10\x02\x12\x0e\n\nREDUCE_MAX\x10\x03\x12\x0e\n\nREDUCE_SUM\x10\x04\x12\x11\n\rREDUCE_STDDEV\x10\x05\x12\x10\n\x0cREDUCE_COUNT\x10\x06\x12\x15\n\x11REDUCE_COUNT_TRUE\x10\x07\x12\x16\n\x12REDUCE_COUNT_FALSE\x10\x0f\x12\x18\n\x14REDUCE_FRACTION_TRUE\x10\x08\x12\x18\n\x14REDUCE_PERCENTILE_99\x10\t\x12\x18\n\x14REDUCE_PERCENTILE_95\x10\n\x12\x18\n\x14REDUCE_PERCENTILE_50\x10\x0b\x12\x18\n\x14REDUCE_PERCENTILE_05\x10\x0c*\x9e\x01\n\x0e\x43omparisonType\x12\x1a\n\x16\x43OMPARISON_UNSPECIFIED\x10\x00\x12\x11\n\rCOMPARISON_GT\x10\x01\x12\x11\n\rCOMPARISON_GE\x10\x02\x12\x11\n\rCOMPARISON_LT\x10\x03\x12\x11\n\rCOMPARISON_LE\x10\x04\x12\x11\n\rCOMPARISON_EQ\x10\x05\x12\x11\n\rCOMPARISON_NE\x10\x06*a\n\x0bServiceTier\x12\x1c\n\x18SERVICE_TIER_UNSPECIFIED\x10\x00\x12\x16\n\x12SERVICE_TIER_BASIC\x10\x01\x12\x18\n\x14SERVICE_TIER_PREMIUM\x10\x02\x1a\x02\x18\x01\x42\xc3\x01\n\x18\x63om.google.monitoring.v3B\x0b\x43ommonProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_distribution__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - -_COMPARISONTYPE = _descriptor.EnumDescriptor( - name="ComparisonType", - full_name="google.monitoring.v3.ComparisonType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="COMPARISON_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="COMPARISON_GT", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="COMPARISON_GE", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="COMPARISON_LT", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="COMPARISON_LE", index=4, number=4, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="COMPARISON_EQ", index=5, number=5, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="COMPARISON_NE", index=6, number=6, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1395, - serialized_end=1553, -) -_sym_db.RegisterEnumDescriptor(_COMPARISONTYPE) - -ComparisonType = enum_type_wrapper.EnumTypeWrapper(_COMPARISONTYPE) -_SERVICETIER = _descriptor.EnumDescriptor( - name="ServiceTier", - full_name="google.monitoring.v3.ServiceTier", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="SERVICE_TIER_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="SERVICE_TIER_BASIC", - index=1, - number=1, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="SERVICE_TIER_PREMIUM", - index=2, - number=2, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=b"\030\001", - serialized_start=1555, - serialized_end=1652, -) -_sym_db.RegisterEnumDescriptor(_SERVICETIER) - -ServiceTier = enum_type_wrapper.EnumTypeWrapper(_SERVICETIER) -COMPARISON_UNSPECIFIED = 0 -COMPARISON_GT = 1 -COMPARISON_GE = 2 -COMPARISON_LT = 3 -COMPARISON_LE = 4 -COMPARISON_EQ = 5 -COMPARISON_NE = 6 -SERVICE_TIER_UNSPECIFIED = 0 -SERVICE_TIER_BASIC = 1 -SERVICE_TIER_PREMIUM = 2 - - -_AGGREGATION_ALIGNER = _descriptor.EnumDescriptor( - name="Aligner", - full_name="google.monitoring.v3.Aggregation.Aligner", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="ALIGN_NONE", index=0, number=0, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_DELTA", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_RATE", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_INTERPOLATE", - index=3, - number=3, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_NEXT_OLDER", - index=4, - number=4, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_MIN", index=5, number=10, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_MAX", index=6, number=11, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_MEAN", index=7, number=12, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_COUNT", index=8, number=13, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_SUM", index=9, number=14, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_STDDEV", index=10, number=15, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_COUNT_TRUE", - index=11, - number=16, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_COUNT_FALSE", - index=12, - number=24, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_FRACTION_TRUE", - index=13, - number=17, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_PERCENTILE_99", - index=14, - number=18, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_PERCENTILE_95", - index=15, - number=19, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_PERCENTILE_50", - index=16, - number=20, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_PERCENTILE_05", - index=17, - number=21, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ALIGN_PERCENT_CHANGE", - index=18, - number=23, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=689, - serialized_end=1084, -) -_sym_db.RegisterEnumDescriptor(_AGGREGATION_ALIGNER) - -_AGGREGATION_REDUCER = _descriptor.EnumDescriptor( - name="Reducer", - full_name="google.monitoring.v3.Aggregation.Reducer", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="REDUCE_NONE", index=0, number=0, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_MEAN", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_MIN", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_MAX", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_SUM", index=4, number=4, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_STDDEV", index=5, number=5, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_COUNT", index=6, number=6, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_COUNT_TRUE", - index=7, - number=7, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_COUNT_FALSE", - index=8, - number=15, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_FRACTION_TRUE", - index=9, - number=8, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_PERCENTILE_99", - index=10, - number=9, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_PERCENTILE_95", - index=11, - number=10, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_PERCENTILE_50", - index=12, - number=11, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="REDUCE_PERCENTILE_05", - index=13, - number=12, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1087, - serialized_end=1392, -) -_sym_db.RegisterEnumDescriptor(_AGGREGATION_REDUCER) - - -_TYPEDVALUE = _descriptor.Descriptor( - name="TypedValue", - full_name="google.monitoring.v3.TypedValue", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="bool_value", - full_name="google.monitoring.v3.TypedValue.bool_value", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="int64_value", - full_name="google.monitoring.v3.TypedValue.int64_value", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="double_value", - full_name="google.monitoring.v3.TypedValue.double_value", - index=2, - number=3, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="string_value", - full_name="google.monitoring.v3.TypedValue.string_value", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="distribution_value", - full_name="google.monitoring.v3.TypedValue.distribution_value", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="value", - full_name="google.monitoring.v3.TypedValue.value", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=168, - serialized_end=338, -) - - -_TIMEINTERVAL = _descriptor.Descriptor( - name="TimeInterval", - full_name="google.monitoring.v3.TimeInterval", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.monitoring.v3.TimeInterval.end_time", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.monitoring.v3.TimeInterval.start_time", - index=1, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=340, - serialized_end=448, -) - - -_AGGREGATION = _descriptor.Descriptor( - name="Aggregation", - full_name="google.monitoring.v3.Aggregation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="alignment_period", - full_name="google.monitoring.v3.Aggregation.alignment_period", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="per_series_aligner", - full_name="google.monitoring.v3.Aggregation.per_series_aligner", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cross_series_reducer", - full_name="google.monitoring.v3.Aggregation.cross_series_reducer", - index=2, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="group_by_fields", - full_name="google.monitoring.v3.Aggregation.group_by_fields", - index=3, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_AGGREGATION_ALIGNER, _AGGREGATION_REDUCER], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=451, - serialized_end=1392, -) - -_TYPEDVALUE.fields_by_name[ - "distribution_value" -].message_type = google_dot_api_dot_distribution__pb2._DISTRIBUTION -_TYPEDVALUE.oneofs_by_name["value"].fields.append( - _TYPEDVALUE.fields_by_name["bool_value"] -) -_TYPEDVALUE.fields_by_name["bool_value"].containing_oneof = _TYPEDVALUE.oneofs_by_name[ - "value" -] -_TYPEDVALUE.oneofs_by_name["value"].fields.append( - _TYPEDVALUE.fields_by_name["int64_value"] -) -_TYPEDVALUE.fields_by_name["int64_value"].containing_oneof = _TYPEDVALUE.oneofs_by_name[ - "value" -] -_TYPEDVALUE.oneofs_by_name["value"].fields.append( - _TYPEDVALUE.fields_by_name["double_value"] -) -_TYPEDVALUE.fields_by_name[ - "double_value" -].containing_oneof = _TYPEDVALUE.oneofs_by_name["value"] -_TYPEDVALUE.oneofs_by_name["value"].fields.append( - _TYPEDVALUE.fields_by_name["string_value"] -) -_TYPEDVALUE.fields_by_name[ - "string_value" -].containing_oneof = _TYPEDVALUE.oneofs_by_name["value"] -_TYPEDVALUE.oneofs_by_name["value"].fields.append( - _TYPEDVALUE.fields_by_name["distribution_value"] -) -_TYPEDVALUE.fields_by_name[ - "distribution_value" -].containing_oneof = _TYPEDVALUE.oneofs_by_name["value"] -_TIMEINTERVAL.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TIMEINTERVAL.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_AGGREGATION.fields_by_name[ - "alignment_period" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_AGGREGATION.fields_by_name["per_series_aligner"].enum_type = _AGGREGATION_ALIGNER -_AGGREGATION.fields_by_name["cross_series_reducer"].enum_type = _AGGREGATION_REDUCER -_AGGREGATION_ALIGNER.containing_type = _AGGREGATION -_AGGREGATION_REDUCER.containing_type = _AGGREGATION -DESCRIPTOR.message_types_by_name["TypedValue"] = _TYPEDVALUE -DESCRIPTOR.message_types_by_name["TimeInterval"] = _TIMEINTERVAL -DESCRIPTOR.message_types_by_name["Aggregation"] = _AGGREGATION -DESCRIPTOR.enum_types_by_name["ComparisonType"] = _COMPARISONTYPE -DESCRIPTOR.enum_types_by_name["ServiceTier"] = _SERVICETIER -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TypedValue = _reflection.GeneratedProtocolMessageType( - "TypedValue", - (_message.Message,), - { - "DESCRIPTOR": _TYPEDVALUE, - "__module__": "google.cloud.monitoring_v3.proto.common_pb2", - "__doc__": """A single strongly-typed value. - - - Attributes: - value: - The typed value field. - bool_value: - A Boolean value: ``true`` or ``false``. - int64_value: - A 64-bit integer. Its range is approximately ±9.2x1018. - double_value: - A 64-bit double-precision floating-point number. Its magnitude - is approximately ±10±300 and it has 16 significant digits of - precision. - string_value: - A variable-length string value. - distribution_value: - A distribution value. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TypedValue) - }, -) -_sym_db.RegisterMessage(TypedValue) - -TimeInterval = _reflection.GeneratedProtocolMessageType( - "TimeInterval", - (_message.Message,), - { - "DESCRIPTOR": _TIMEINTERVAL, - "__module__": "google.cloud.monitoring_v3.proto.common_pb2", - "__doc__": """A closed time interval. It extends from the start time to the end time, - and includes both: ``[startTime, endTime]``. Valid time intervals depend - on the - ```MetricKind`` `__ - of the metric value. In no case can the end time be earlier than the - start time. - - - For a ``GAUGE`` metric, the ``startTime`` value is technically - optional; if no value is specified, the start time defaults to the - value of the end time, and the interval represents a single point in - time. If both start and end times are specified, they must be - identical. Such an interval is valid only for ``GAUGE`` metrics, - which are point-in-time measurements. - - - For ``DELTA`` and ``CUMULATIVE`` metrics, the start time must be - earlier than the end time. - - - In all cases, the start time of the next interval must be at least a - millisecond after the end time of the previous interval. Because the - interval is closed, if the start time of a new interval is the same - as the end time of the previous interval, data written at the new - start time could overwrite data written at the previous end time. - - - Attributes: - end_time: - Required. The end of the time interval. - start_time: - Optional. The beginning of the time interval. The default - value for the start time is the end time. The start time must - not be later than the end time. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeInterval) - }, -) -_sym_db.RegisterMessage(TimeInterval) - -Aggregation = _reflection.GeneratedProtocolMessageType( - "Aggregation", - (_message.Message,), - { - "DESCRIPTOR": _AGGREGATION, - "__module__": "google.cloud.monitoring_v3.proto.common_pb2", - "__doc__": """Describes how to combine multiple time series to provide a - different view of the data. Aggregation of time series is done in two - steps. First, each time series in the set is *aligned* to the same time - interval boundaries, then the set of time series is optionally *reduced* - in number. - - Alignment consists of applying the ``per_series_aligner`` operation to - each time series after its data has been divided into regular - ``alignment_period`` time intervals. This process takes *all* of the - data points in an alignment period, applies a mathematical - transformation such as averaging, minimum, maximum, delta, etc., and - converts them into a single data point per period. - - Reduction is when the aligned and transformed time series can optionally - be combined, reducing the number of time series through similar - mathematical transformations. Reduction involves applying a - ``cross_series_reducer`` to all the time series, optionally sorting the - time series into subsets with ``group_by_fields``, and applying the - reducer to each subset. - - The raw time series data can contain a huge amount of information from - multiple sources. Alignment and reduction transforms this mass of data - into a more manageable and representative collection of data, for - example “the 95% latency across the average of all tasks in a cluster”. - This representative data can be more easily graphed and comprehended, - and the individual time series data is still available for later - drilldown. For more details, see `Filtering and - aggregation `__. - - - Attributes: - alignment_period: - The ``alignment_period`` specifies a time interval, in - seconds, that is used to divide the data in all the [time - series][google.monitoring.v3.TimeSeries] into consistent - blocks of time. This will be done before the per-series - aligner can be applied to the data. The value must be at - least 60 seconds. If a per-series aligner other than - ``ALIGN_NONE`` is specified, this field is required or an - error is returned. If no per-series aligner is specified, or - the aligner ``ALIGN_NONE`` is specified, then this field is - ignored. - per_series_aligner: - An ``Aligner`` describes how to bring the data points in a - single time series into temporal alignment. Except for - ``ALIGN_NONE``, all alignments cause all the data points in an - ``alignment_period`` to be mathematically grouped together, - resulting in a single data point for each ``alignment_period`` - with end timestamp at the end of the period. Not all - alignment operations may be applied to all time series. The - valid choices depend on the ``metric_kind`` and ``value_type`` - of the original time series. Alignment can change the - ``metric_kind`` or the ``value_type`` of the time series. - Time series data must be aligned in order to perform cross- - time series reduction. If ``cross_series_reducer`` is - specified, then ``per_series_aligner`` must be specified and - not equal to ``ALIGN_NONE`` and ``alignment_period`` must be - specified; otherwise, an error is returned. - cross_series_reducer: - The reduction operation to be used to combine time series into - a single time series, where the value of each data point in - the resulting series is a function of all the already aligned - values in the input time series. Not all reducer operations - can be applied to all time series. The valid choices depend on - the ``metric_kind`` and the ``value_type`` of the original - time series. Reduction can yield a time series with a - different ``metric_kind`` or ``value_type`` than the input - time series. Time series data must first be aligned (see - ``per_series_aligner``) in order to perform cross-time series - reduction. If ``cross_series_reducer`` is specified, then - ``per_series_aligner`` must be specified, and must not be - ``ALIGN_NONE``. An ``alignment_period`` must also be - specified; otherwise, an error is returned. - group_by_fields: - The set of fields to preserve when ``cross_series_reducer`` is - specified. The ``group_by_fields`` determine how the time - series are partitioned into subsets prior to applying the - aggregation operation. Each subset contains time series that - have the same value for each of the grouping fields. Each - individual time series is a member of exactly one subset. The - ``cross_series_reducer`` is applied to each subset of time - series. It is not possible to reduce across different resource - types, so this field implicitly contains ``resource.type``. - Fields not specified in ``group_by_fields`` are aggregated - away. If ``group_by_fields`` is not specified and all the time - series have the same resource type, then the time series are - aggregated into a single output time series. If - ``cross_series_reducer`` is not defined, this field is - ignored. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Aggregation) - }, -) -_sym_db.RegisterMessage(Aggregation) - - -DESCRIPTOR._options = None -_SERVICETIER._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/common_pb2_grpc.py b/google/cloud/monitoring_v3/proto/common_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/common_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/dropped_labels_pb2.py b/google/cloud/monitoring_v3/proto/dropped_labels_pb2.py deleted file mode 100644 index 5e27eaf3..00000000 --- a/google/cloud/monitoring_v3/proto/dropped_labels_pb2.py +++ /dev/null @@ -1,169 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/dropped_labels.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/dropped_labels.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\022DroppedLabelsProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n5google/cloud/monitoring_v3/proto/dropped_labels.proto\x12\x14google.monitoring.v3"|\n\rDroppedLabels\x12=\n\x05label\x18\x01 \x03(\x0b\x32..google.monitoring.v3.DroppedLabels.LabelEntry\x1a,\n\nLabelEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\xca\x01\n\x18\x63om.google.monitoring.v3B\x12\x44roppedLabelsProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', -) - - -_DROPPEDLABELS_LABELENTRY = _descriptor.Descriptor( - name="LabelEntry", - full_name="google.monitoring.v3.DroppedLabels.LabelEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.monitoring.v3.DroppedLabels.LabelEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.monitoring.v3.DroppedLabels.LabelEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=159, - serialized_end=203, -) - -_DROPPEDLABELS = _descriptor.Descriptor( - name="DroppedLabels", - full_name="google.monitoring.v3.DroppedLabels", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="label", - full_name="google.monitoring.v3.DroppedLabels.label", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[_DROPPEDLABELS_LABELENTRY], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=79, - serialized_end=203, -) - -_DROPPEDLABELS_LABELENTRY.containing_type = _DROPPEDLABELS -_DROPPEDLABELS.fields_by_name["label"].message_type = _DROPPEDLABELS_LABELENTRY -DESCRIPTOR.message_types_by_name["DroppedLabels"] = _DROPPEDLABELS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -DroppedLabels = _reflection.GeneratedProtocolMessageType( - "DroppedLabels", - (_message.Message,), - { - "LabelEntry": _reflection.GeneratedProtocolMessageType( - "LabelEntry", - (_message.Message,), - { - "DESCRIPTOR": _DROPPEDLABELS_LABELENTRY, - "__module__": "google.cloud.monitoring_v3.proto.dropped_labels_pb2" - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DroppedLabels.LabelEntry) - }, - ), - "DESCRIPTOR": _DROPPEDLABELS, - "__module__": "google.cloud.monitoring_v3.proto.dropped_labels_pb2", - "__doc__": """A set of (label, value) pairs which were dropped during - aggregation, attached to google.api.Distribution.Exemplars in - google.api.Distribution values during aggregation. - - These values are used in combination with the label values that remain - on the aggregated Distribution timeseries to construct the full label - set for the exemplar values. The resulting full label set may be used to - identify the specific task/job/instance (for example) which may be - contributing to a long-tail, while allowing the storage savings of only - storing aggregated distribution values for a large group. - - Note that there are no guarantees on ordering of the labels from - exemplar-to-exemplar and from distribution-to-distribution in the same - stream, and there may be duplicates. It is up to clients to resolve any - ambiguities. - - - Attributes: - label: - Map from label to its value, for all labels dropped in any - aggregation. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DroppedLabels) - }, -) -_sym_db.RegisterMessage(DroppedLabels) -_sym_db.RegisterMessage(DroppedLabels.LabelEntry) - - -DESCRIPTOR._options = None -_DROPPEDLABELS_LABELENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/dropped_labels_pb2_grpc.py b/google/cloud/monitoring_v3/proto/dropped_labels_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/dropped_labels_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/group_pb2.py b/google/cloud/monitoring_v3/proto/group_pb2.py deleted file mode 100644 index 40e28984..00000000 --- a/google/cloud/monitoring_v3/proto/group_pb2.py +++ /dev/null @@ -1,207 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/group.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/group.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\nGroupProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n,google/cloud/monitoring_v3/proto/group.proto\x12\x14google.monitoring.v3\x1a\x19google/api/resource.proto"\x80\x02\n\x05Group\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0bparent_name\x18\x03 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x05 \x01(\t\x12\x12\n\nis_cluster\x18\x06 \x01(\x08:\x99\x01\xea\x41\x95\x01\n\x1fmonitoring.googleapis.com/Group\x12!projects/{project}/groups/{group}\x12+organizations/{organization}/groups/{group}\x12\x1f\x66olders/{folder}/groups/{group}\x12\x01*B\xc2\x01\n\x18\x63om.google.monitoring.v3B\nGroupProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[google_dot_api_dot_resource__pb2.DESCRIPTOR], -) - - -_GROUP = _descriptor.Descriptor( - name="Group", - full_name="google.monitoring.v3.Group", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.Group.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.Group.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="parent_name", - full_name="google.monitoring.v3.Group.parent_name", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.Group.filter", - index=3, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_cluster", - full_name="google.monitoring.v3.Group.is_cluster", - index=4, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"\352A\225\001\n\037monitoring.googleapis.com/Group\022!projects/{project}/groups/{group}\022+organizations/{organization}/groups/{group}\022\037folders/{folder}/groups/{group}\022\001*", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=98, - serialized_end=354, -) - -DESCRIPTOR.message_types_by_name["Group"] = _GROUP -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Group = _reflection.GeneratedProtocolMessageType( - "Group", - (_message.Message,), - { - "DESCRIPTOR": _GROUP, - "__module__": "google.cloud.monitoring_v3.proto.group_pb2", - "__doc__": """The description of a dynamic collection of monitored - resources. Each group has a filter that is matched against monitored - resources and their associated metadata. If a group’s filter matches an - available monitored resource, then that resource is a member of that - group. Groups can contain any number of monitored resources, and each - monitored resource can be a member of any number of groups. - - Groups can be nested in parent-child hierarchies. The ``parentName`` - field identifies an optional parent for each group. If a group has a - parent, then the only monitored resources available to be matched by the - group’s filter are the resources contained in the parent group. In other - words, a group contains the monitored resources that match its filter - and the filters of all the group’s ancestors. A group without a parent - can contain any monitored resource. - - For example, consider an infrastructure running a set of instances with - two user-defined tags: ``"environment"`` and ``"role"``. A parent group - has a filter, ``environment="production"``. A child of that parent group - has a filter, ``role="transcoder"``. The parent group contains all - instances in the production environment, regardless of their roles. The - child group contains instances that have the transcoder role *and* are - in the production environment. - - The monitored resources contained in a group can change at any moment, - depending on what resources exist and what filters are associated with - the group and its ancestors. - - - Attributes: - name: - Output only. The name of this group. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When - creating a group, this field is ignored and a new name is - created consisting of the project specified in the call to - ``CreateGroup`` and a unique ``[GROUP_ID]`` that is generated - automatically. - display_name: - A user-assigned name for this group, used only for display - purposes. - parent_name: - The name of the group’s parent, if it has one. The format is: - :: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] For - groups with no parent, ``parent_name`` is the empty string, - ``""``. - filter: - The filter used to determine which monitored resources belong - to this group. - is_cluster: - If true, the members of this group are considered to be a - cluster. The system can perform additional analysis on groups - that are clusters. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Group) - }, -) -_sym_db.RegisterMessage(Group) - - -DESCRIPTOR._options = None -_GROUP._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/group_pb2_grpc.py b/google/cloud/monitoring_v3/proto/group_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/group_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/group_service_pb2.py b/google/cloud/monitoring_v3/proto/group_service_pb2.py deleted file mode 100644 index e5795f04..00000000 --- a/google/cloud/monitoring_v3/proto/group_service_pb2.py +++ /dev/null @@ -1,1014 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/group_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import ( - monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2, -) -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.monitoring_v3.proto import ( - common_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2, -) -from google.cloud.monitoring_v3.proto import ( - group_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/group_service.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\021GroupServiceProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n4google/cloud/monitoring_v3/proto/group_service.proto\x12\x14google.monitoring.v3\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/api/monitored_resource.proto\x1a\x19google/api/resource.proto\x1a-google/cloud/monitoring_v3/proto/common.proto\x1a,google/cloud/monitoring_v3/proto/group.proto\x1a\x1bgoogle/protobuf/empty.proto"\xc8\x02\n\x11ListGroupsRequest\x12\x35\n\x04name\x18\x07 \x01(\tB\'\xe0\x41\x02\xfa\x41!\x12\x1fmonitoring.googleapis.com/Group\x12\x41\n\x11\x63hildren_of_group\x18\x02 \x01(\tB$\xfa\x41!\n\x1fmonitoring.googleapis.com/GroupH\x00\x12\x42\n\x12\x61ncestors_of_group\x18\x03 \x01(\tB$\xfa\x41!\n\x1fmonitoring.googleapis.com/GroupH\x00\x12\x44\n\x14\x64\x65scendants_of_group\x18\x04 \x01(\tB$\xfa\x41!\n\x1fmonitoring.googleapis.com/GroupH\x00\x12\x11\n\tpage_size\x18\x05 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\tB\x08\n\x06\x66ilter"Y\n\x12ListGroupsResponse\x12*\n\x05group\x18\x01 \x03(\x0b\x32\x1b.google.monitoring.v3.Group\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"H\n\x0fGetGroupRequest\x12\x35\n\x04name\x18\x03 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fmonitoring.googleapis.com/Group"\x93\x01\n\x12\x43reateGroupRequest\x12\x35\n\x04name\x18\x04 \x01(\tB\'\xe0\x41\x02\xfa\x41!\x12\x1fmonitoring.googleapis.com/Group\x12/\n\x05group\x18\x02 \x01(\x0b\x32\x1b.google.monitoring.v3.GroupB\x03\xe0\x41\x02\x12\x15\n\rvalidate_only\x18\x03 \x01(\x08"\\\n\x12UpdateGroupRequest\x12/\n\x05group\x18\x02 \x01(\x0b\x32\x1b.google.monitoring.v3.GroupB\x03\xe0\x41\x02\x12\x15\n\rvalidate_only\x18\x03 \x01(\x08"^\n\x12\x44\x65leteGroupRequest\x12\x35\n\x04name\x18\x03 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fmonitoring.googleapis.com/Group\x12\x11\n\trecursive\x18\x04 \x01(\x08"\xbd\x01\n\x17ListGroupMembersRequest\x12\x35\n\x04name\x18\x07 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fmonitoring.googleapis.com/Group\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x05 \x01(\t\x12\x34\n\x08interval\x18\x06 \x01(\x0b\x32".google.monitoring.v3.TimeInterval"w\n\x18ListGroupMembersResponse\x12.\n\x07members\x18\x01 \x03(\x0b\x32\x1d.google.api.MonitoredResource\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x12\n\ntotal_size\x18\x03 \x01(\x05\x32\x98\x08\n\x0cGroupService\x12\x8c\x01\n\nListGroups\x12\'.google.monitoring.v3.ListGroupsRequest\x1a(.google.monitoring.v3.ListGroupsResponse"+\x82\xd3\xe4\x93\x02\x1e\x12\x1c/v3/{name=projects/*}/groups\xda\x41\x04name\x12}\n\x08GetGroup\x12%.google.monitoring.v3.GetGroupRequest\x1a\x1b.google.monitoring.v3.Group"-\x82\xd3\xe4\x93\x02 \x12\x1e/v3/{name=projects/*/groups/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x43reateGroup\x12(.google.monitoring.v3.CreateGroupRequest\x1a\x1b.google.monitoring.v3.Group"8\x82\xd3\xe4\x93\x02%"\x1c/v3/{name=projects/*}/groups:\x05group\xda\x41\nname,group\x12\x91\x01\n\x0bUpdateGroup\x12(.google.monitoring.v3.UpdateGroupRequest\x1a\x1b.google.monitoring.v3.Group";\x82\xd3\xe4\x93\x02-\x1a$/v3/{group.name=projects/*/groups/*}:\x05group\xda\x41\x05group\x12~\n\x0b\x44\x65leteGroup\x12(.google.monitoring.v3.DeleteGroupRequest\x1a\x16.google.protobuf.Empty"-\x82\xd3\xe4\x93\x02 *\x1e/v3/{name=projects/*/groups/*}\xda\x41\x04name\x12\xa8\x01\n\x10ListGroupMembers\x12-.google.monitoring.v3.ListGroupMembersRequest\x1a..google.monitoring.v3.ListGroupMembersResponse"5\x82\xd3\xe4\x93\x02(\x12&/v3/{name=projects/*/groups/*}/members\xda\x41\x04name\x1a\xa9\x01\xca\x41\x19monitoring.googleapis.com\xd2\x41\x89\x01https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.readB\xc9\x01\n\x18\x63om.google.monitoring.v3B\x11GroupServiceProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_monitored__resource__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - ], -) - - -_LISTGROUPSREQUEST = _descriptor.Descriptor( - name="ListGroupsRequest", - full_name="google.monitoring.v3.ListGroupsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ListGroupsRequest.name", - index=0, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\022\037monitoring.googleapis.com/Group", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="children_of_group", - full_name="google.monitoring.v3.ListGroupsRequest.children_of_group", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A!\n\037monitoring.googleapis.com/Group", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="ancestors_of_group", - full_name="google.monitoring.v3.ListGroupsRequest.ancestors_of_group", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A!\n\037monitoring.googleapis.com/Group", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="descendants_of_group", - full_name="google.monitoring.v3.ListGroupsRequest.descendants_of_group", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A!\n\037monitoring.googleapis.com/Group", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListGroupsRequest.page_size", - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListGroupsRequest.page_token", - index=5, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="filter", - full_name="google.monitoring.v3.ListGroupsRequest.filter", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=353, - serialized_end=681, -) - - -_LISTGROUPSRESPONSE = _descriptor.Descriptor( - name="ListGroupsResponse", - full_name="google.monitoring.v3.ListGroupsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="group", - full_name="google.monitoring.v3.ListGroupsResponse.group", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListGroupsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=683, - serialized_end=772, -) - - -_GETGROUPREQUEST = _descriptor.Descriptor( - name="GetGroupRequest", - full_name="google.monitoring.v3.GetGroupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetGroupRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037monitoring.googleapis.com/Group", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=774, - serialized_end=846, -) - - -_CREATEGROUPREQUEST = _descriptor.Descriptor( - name="CreateGroupRequest", - full_name="google.monitoring.v3.CreateGroupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.CreateGroupRequest.name", - index=0, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\022\037monitoring.googleapis.com/Group", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="group", - full_name="google.monitoring.v3.CreateGroupRequest.group", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="validate_only", - full_name="google.monitoring.v3.CreateGroupRequest.validate_only", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=849, - serialized_end=996, -) - - -_UPDATEGROUPREQUEST = _descriptor.Descriptor( - name="UpdateGroupRequest", - full_name="google.monitoring.v3.UpdateGroupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="group", - full_name="google.monitoring.v3.UpdateGroupRequest.group", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="validate_only", - full_name="google.monitoring.v3.UpdateGroupRequest.validate_only", - index=1, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=998, - serialized_end=1090, -) - - -_DELETEGROUPREQUEST = _descriptor.Descriptor( - name="DeleteGroupRequest", - full_name="google.monitoring.v3.DeleteGroupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.DeleteGroupRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037monitoring.googleapis.com/Group", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="recursive", - full_name="google.monitoring.v3.DeleteGroupRequest.recursive", - index=1, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1092, - serialized_end=1186, -) - - -_LISTGROUPMEMBERSREQUEST = _descriptor.Descriptor( - name="ListGroupMembersRequest", - full_name="google.monitoring.v3.ListGroupMembersRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ListGroupMembersRequest.name", - index=0, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037monitoring.googleapis.com/Group", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListGroupMembersRequest.page_size", - index=1, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListGroupMembersRequest.page_token", - index=2, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.ListGroupMembersRequest.filter", - index=3, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="interval", - full_name="google.monitoring.v3.ListGroupMembersRequest.interval", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1189, - serialized_end=1378, -) - - -_LISTGROUPMEMBERSRESPONSE = _descriptor.Descriptor( - name="ListGroupMembersResponse", - full_name="google.monitoring.v3.ListGroupMembersResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="members", - full_name="google.monitoring.v3.ListGroupMembersResponse.members", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListGroupMembersResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="total_size", - full_name="google.monitoring.v3.ListGroupMembersResponse.total_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1380, - serialized_end=1499, -) - -_LISTGROUPSREQUEST.oneofs_by_name["filter"].fields.append( - _LISTGROUPSREQUEST.fields_by_name["children_of_group"] -) -_LISTGROUPSREQUEST.fields_by_name[ - "children_of_group" -].containing_oneof = _LISTGROUPSREQUEST.oneofs_by_name["filter"] -_LISTGROUPSREQUEST.oneofs_by_name["filter"].fields.append( - _LISTGROUPSREQUEST.fields_by_name["ancestors_of_group"] -) -_LISTGROUPSREQUEST.fields_by_name[ - "ancestors_of_group" -].containing_oneof = _LISTGROUPSREQUEST.oneofs_by_name["filter"] -_LISTGROUPSREQUEST.oneofs_by_name["filter"].fields.append( - _LISTGROUPSREQUEST.fields_by_name["descendants_of_group"] -) -_LISTGROUPSREQUEST.fields_by_name[ - "descendants_of_group" -].containing_oneof = _LISTGROUPSREQUEST.oneofs_by_name["filter"] -_LISTGROUPSRESPONSE.fields_by_name[ - "group" -].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2._GROUP -_CREATEGROUPREQUEST.fields_by_name[ - "group" -].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2._GROUP -_UPDATEGROUPREQUEST.fields_by_name[ - "group" -].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2._GROUP -_LISTGROUPMEMBERSREQUEST.fields_by_name[ - "interval" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._TIMEINTERVAL -) -_LISTGROUPMEMBERSRESPONSE.fields_by_name[ - "members" -].message_type = google_dot_api_dot_monitored__resource__pb2._MONITOREDRESOURCE -DESCRIPTOR.message_types_by_name["ListGroupsRequest"] = _LISTGROUPSREQUEST -DESCRIPTOR.message_types_by_name["ListGroupsResponse"] = _LISTGROUPSRESPONSE -DESCRIPTOR.message_types_by_name["GetGroupRequest"] = _GETGROUPREQUEST -DESCRIPTOR.message_types_by_name["CreateGroupRequest"] = _CREATEGROUPREQUEST -DESCRIPTOR.message_types_by_name["UpdateGroupRequest"] = _UPDATEGROUPREQUEST -DESCRIPTOR.message_types_by_name["DeleteGroupRequest"] = _DELETEGROUPREQUEST -DESCRIPTOR.message_types_by_name["ListGroupMembersRequest"] = _LISTGROUPMEMBERSREQUEST -DESCRIPTOR.message_types_by_name["ListGroupMembersResponse"] = _LISTGROUPMEMBERSRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ListGroupsRequest = _reflection.GeneratedProtocolMessageType( - "ListGroupsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTGROUPSREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.group_service_pb2", - "__doc__": """The ``ListGroup`` request. - - - Attributes: - name: - Required. The project whose groups are to be listed. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] - filter: - An optional filter consisting of a single group name. The - filters limit the groups returned based on their parent-child - relationship with the specified group. If no filter is - specified, all groups are returned. - children_of_group: - A group name. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns - groups whose ``parent_name`` field contains the group name. If - no groups have this parent, the results are empty. - ancestors_of_group: - A group name. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns - groups that are ancestors of the specified group. The groups - are returned in order, starting with the immediate parent and - ending with the most distant ancestor. If the specified group - has no immediate parent, the results are empty. - descendants_of_group: - A group name. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns the - descendants of the specified group. This is a superset of the - results returned by the ``children_of_group`` filter, and - includes children-of-children, and so forth. - page_size: - A positive number that is the maximum number of results to - return. - page_token: - If this field is not empty then it must contain the - ``next_page_token`` value returned by a previous call to this - method. Using this field causes the method to return - additional results from the previous method call. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListGroupsRequest) - }, -) -_sym_db.RegisterMessage(ListGroupsRequest) - -ListGroupsResponse = _reflection.GeneratedProtocolMessageType( - "ListGroupsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTGROUPSRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.group_service_pb2", - "__doc__": """The ``ListGroups`` response. - - - Attributes: - group: - The groups that match the specified filters. - next_page_token: - If there are more results than have been returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListGroupsResponse) - }, -) -_sym_db.RegisterMessage(ListGroupsResponse) - -GetGroupRequest = _reflection.GeneratedProtocolMessageType( - "GetGroupRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETGROUPREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.group_service_pb2", - "__doc__": """The ``GetGroup`` request. - - - Attributes: - name: - Required. The group to retrieve. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetGroupRequest) - }, -) -_sym_db.RegisterMessage(GetGroupRequest) - -CreateGroupRequest = _reflection.GeneratedProtocolMessageType( - "CreateGroupRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEGROUPREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.group_service_pb2", - "__doc__": """The ``CreateGroup`` request. - - - Attributes: - name: - Required. The project in which to create the group. The format - is: :: projects/[PROJECT_ID_OR_NUMBER] - group: - Required. A group definition. It is an error to define the - ``name`` field because the system assigns the name. - validate_only: - If true, validate this request but do not create the group. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateGroupRequest) - }, -) -_sym_db.RegisterMessage(CreateGroupRequest) - -UpdateGroupRequest = _reflection.GeneratedProtocolMessageType( - "UpdateGroupRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEGROUPREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.group_service_pb2", - "__doc__": """The ``UpdateGroup`` request. - - - Attributes: - group: - Required. The new definition of the group. All fields of the - existing group, excepting ``name``, are replaced with the - corresponding fields of this group. - validate_only: - If true, validate this request but do not update the existing - group. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UpdateGroupRequest) - }, -) -_sym_db.RegisterMessage(UpdateGroupRequest) - -DeleteGroupRequest = _reflection.GeneratedProtocolMessageType( - "DeleteGroupRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEGROUPREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.group_service_pb2", - "__doc__": """The ``DeleteGroup`` request. The default behavior is to be - able to delete a single group without any descendants. - - - Attributes: - name: - Required. The group to delete. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - recursive: - If this field is true, then the request means to delete a - group with all its descendants. Otherwise, the request means - to delete a group only when it has no descendants. The default - value is false. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteGroupRequest) - }, -) -_sym_db.RegisterMessage(DeleteGroupRequest) - -ListGroupMembersRequest = _reflection.GeneratedProtocolMessageType( - "ListGroupMembersRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTGROUPMEMBERSREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.group_service_pb2", - "__doc__": """The ``ListGroupMembers`` request. - - - Attributes: - name: - Required. The group whose members are listed. The format is: - :: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - page_size: - A positive number that is the maximum number of results to - return. - page_token: - If this field is not empty then it must contain the - ``next_page_token`` value returned by a previous call to this - method. Using this field causes the method to return - additional results from the previous method call. - filter: - An optional `list filter `__ describing the members to be - returned. The filter may reference the type, labels, and - metadata of monitored resources that comprise the group. For - example, to return only resources representing Compute Engine - VM instances, use this filter: :: `resource.type = - "gce_instance"` - interval: - An optional time interval for which results should be - returned. Only members that were part of the group during the - specified interval are included in the response. If no - interval is provided then the group membership over the last - minute is returned. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListGroupMembersRequest) - }, -) -_sym_db.RegisterMessage(ListGroupMembersRequest) - -ListGroupMembersResponse = _reflection.GeneratedProtocolMessageType( - "ListGroupMembersResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTGROUPMEMBERSRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.group_service_pb2", - "__doc__": """The ``ListGroupMembers`` response. - - - Attributes: - members: - A set of monitored resources in the group. - next_page_token: - If there are more results than have been returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - total_size: - The total number of elements matching this request. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListGroupMembersResponse) - }, -) -_sym_db.RegisterMessage(ListGroupMembersResponse) - - -DESCRIPTOR._options = None -_LISTGROUPSREQUEST.fields_by_name["name"]._options = None -_LISTGROUPSREQUEST.fields_by_name["children_of_group"]._options = None -_LISTGROUPSREQUEST.fields_by_name["ancestors_of_group"]._options = None -_LISTGROUPSREQUEST.fields_by_name["descendants_of_group"]._options = None -_GETGROUPREQUEST.fields_by_name["name"]._options = None -_CREATEGROUPREQUEST.fields_by_name["name"]._options = None -_CREATEGROUPREQUEST.fields_by_name["group"]._options = None -_UPDATEGROUPREQUEST.fields_by_name["group"]._options = None -_DELETEGROUPREQUEST.fields_by_name["name"]._options = None -_LISTGROUPMEMBERSREQUEST.fields_by_name["name"]._options = None - -_GROUPSERVICE = _descriptor.ServiceDescriptor( - name="GroupService", - full_name="google.monitoring.v3.GroupService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\031monitoring.googleapis.com\322A\211\001https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.read", - serialized_start=1502, - serialized_end=2550, - methods=[ - _descriptor.MethodDescriptor( - name="ListGroups", - full_name="google.monitoring.v3.GroupService.ListGroups", - index=0, - containing_service=None, - input_type=_LISTGROUPSREQUEST, - output_type=_LISTGROUPSRESPONSE, - serialized_options=b"\202\323\344\223\002\036\022\034/v3/{name=projects/*}/groups\332A\004name", - ), - _descriptor.MethodDescriptor( - name="GetGroup", - full_name="google.monitoring.v3.GroupService.GetGroup", - index=1, - containing_service=None, - input_type=_GETGROUPREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2._GROUP, - serialized_options=b"\202\323\344\223\002 \022\036/v3/{name=projects/*/groups/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="CreateGroup", - full_name="google.monitoring.v3.GroupService.CreateGroup", - index=2, - containing_service=None, - input_type=_CREATEGROUPREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2._GROUP, - serialized_options=b'\202\323\344\223\002%"\034/v3/{name=projects/*}/groups:\005group\332A\nname,group', - ), - _descriptor.MethodDescriptor( - name="UpdateGroup", - full_name="google.monitoring.v3.GroupService.UpdateGroup", - index=3, - containing_service=None, - input_type=_UPDATEGROUPREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2._GROUP, - serialized_options=b"\202\323\344\223\002-\032$/v3/{group.name=projects/*/groups/*}:\005group\332A\005group", - ), - _descriptor.MethodDescriptor( - name="DeleteGroup", - full_name="google.monitoring.v3.GroupService.DeleteGroup", - index=4, - containing_service=None, - input_type=_DELETEGROUPREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002 *\036/v3/{name=projects/*/groups/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="ListGroupMembers", - full_name="google.monitoring.v3.GroupService.ListGroupMembers", - index=5, - containing_service=None, - input_type=_LISTGROUPMEMBERSREQUEST, - output_type=_LISTGROUPMEMBERSRESPONSE, - serialized_options=b"\202\323\344\223\002(\022&/v3/{name=projects/*/groups/*}/members\332A\004name", - ), - ], -) -_sym_db.RegisterServiceDescriptor(_GROUPSERVICE) - -DESCRIPTOR.services_by_name["GroupService"] = _GROUPSERVICE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/group_service_pb2_grpc.py b/google/cloud/monitoring_v3/proto/group_service_pb2_grpc.py deleted file mode 100644 index d681b6c1..00000000 --- a/google/cloud/monitoring_v3/proto/group_service_pb2_grpc.py +++ /dev/null @@ -1,161 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.monitoring_v3.proto import ( - group_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2, -) -from google.cloud.monitoring_v3.proto import ( - group_service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class GroupServiceStub(object): - """The Group API lets you inspect and manage your - [groups](#google.monitoring.v3.Group). - - A group is a named filter that is used to identify - a collection of monitored resources. Groups are typically used to - mirror the physical and/or logical topology of the environment. - Because group membership is computed dynamically, monitored - resources that are started in the future are automatically placed - in matching groups. By using a group to name monitored resources in, - for example, an alert policy, the target of that alert policy is - updated automatically as monitored resources are added and removed - from the infrastructure. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ListGroups = channel.unary_unary( - "/google.monitoring.v3.GroupService/ListGroups", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupsResponse.FromString, - ) - self.GetGroup = channel.unary_unary( - "/google.monitoring.v3.GroupService/GetGroup", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.GetGroupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString, - ) - self.CreateGroup = channel.unary_unary( - "/google.monitoring.v3.GroupService/CreateGroup", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.CreateGroupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString, - ) - self.UpdateGroup = channel.unary_unary( - "/google.monitoring.v3.GroupService/UpdateGroup", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.UpdateGroupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString, - ) - self.DeleteGroup = channel.unary_unary( - "/google.monitoring.v3.GroupService/DeleteGroup", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.DeleteGroupRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ListGroupMembers = channel.unary_unary( - "/google.monitoring.v3.GroupService/ListGroupMembers", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupMembersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupMembersResponse.FromString, - ) - - -class GroupServiceServicer(object): - """The Group API lets you inspect and manage your - [groups](#google.monitoring.v3.Group). - - A group is a named filter that is used to identify - a collection of monitored resources. Groups are typically used to - mirror the physical and/or logical topology of the environment. - Because group membership is computed dynamically, monitored - resources that are started in the future are automatically placed - in matching groups. By using a group to name monitored resources in, - for example, an alert policy, the target of that alert policy is - updated automatically as monitored resources are added and removed - from the infrastructure. - """ - - def ListGroups(self, request, context): - """Lists the existing groups. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetGroup(self, request, context): - """Gets a single group. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateGroup(self, request, context): - """Creates a new group. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateGroup(self, request, context): - """Updates an existing group. - You can change any group attributes except `name`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteGroup(self, request, context): - """Deletes an existing group. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListGroupMembers(self, request, context): - """Lists the monitored resources that are members of a group. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_GroupServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListGroups": grpc.unary_unary_rpc_method_handler( - servicer.ListGroups, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupsRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupsResponse.SerializeToString, - ), - "GetGroup": grpc.unary_unary_rpc_method_handler( - servicer.GetGroup, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.GetGroupRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.SerializeToString, - ), - "CreateGroup": grpc.unary_unary_rpc_method_handler( - servicer.CreateGroup, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.CreateGroupRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.SerializeToString, - ), - "UpdateGroup": grpc.unary_unary_rpc_method_handler( - servicer.UpdateGroup, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.UpdateGroupRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.SerializeToString, - ), - "DeleteGroup": grpc.unary_unary_rpc_method_handler( - servicer.DeleteGroup, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.DeleteGroupRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ListGroupMembers": grpc.unary_unary_rpc_method_handler( - servicer.ListGroupMembers, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupMembersRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupMembersResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.monitoring.v3.GroupService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/google/cloud/monitoring_v3/proto/metric_pb2.py b/google/cloud/monitoring_v3/proto/metric_pb2.py deleted file mode 100644 index b045d5be..00000000 --- a/google/cloud/monitoring_v3/proto/metric_pb2.py +++ /dev/null @@ -1,1163 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/metric.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import distribution_pb2 as google_dot_api_dot_distribution__pb2 -from google.api import label_pb2 as google_dot_api_dot_label__pb2 -from google.api import metric_pb2 as google_dot_api_dot_metric__pb2 -from google.api import ( - monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2, -) -from google.cloud.monitoring_v3.proto import ( - common_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/metric.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\013MetricProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n-google/cloud/monitoring_v3/proto/metric.proto\x12\x14google.monitoring.v3\x1a\x1dgoogle/api/distribution.proto\x1a\x16google/api/label.proto\x1a\x17google/api/metric.proto\x1a#google/api/monitored_resource.proto\x1a-google/cloud/monitoring_v3/proto/common.proto\x1a\x1egoogle/protobuf/duration.proto"n\n\x05Point\x12\x34\n\x08interval\x18\x01 \x01(\x0b\x32".google.monitoring.v3.TimeInterval\x12/\n\x05value\x18\x02 \x01(\x0b\x32 .google.monitoring.v3.TypedValue"\xc1\x02\n\nTimeSeries\x12"\n\x06metric\x18\x01 \x01(\x0b\x32\x12.google.api.Metric\x12/\n\x08resource\x18\x02 \x01(\x0b\x32\x1d.google.api.MonitoredResource\x12\x37\n\x08metadata\x18\x07 \x01(\x0b\x32%.google.api.MonitoredResourceMetadata\x12<\n\x0bmetric_kind\x18\x03 \x01(\x0e\x32\'.google.api.MetricDescriptor.MetricKind\x12:\n\nvalue_type\x18\x04 \x01(\x0e\x32&.google.api.MetricDescriptor.ValueType\x12+\n\x06points\x18\x05 \x03(\x0b\x32\x1b.google.monitoring.v3.Point"\xc0\x02\n\x14TimeSeriesDescriptor\x12\x36\n\x11label_descriptors\x18\x01 \x03(\x0b\x32\x1b.google.api.LabelDescriptor\x12U\n\x11point_descriptors\x18\x05 \x03(\x0b\x32:.google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor\x1a\x98\x01\n\x0fValueDescriptor\x12\x0b\n\x03key\x18\x01 \x01(\t\x12:\n\nvalue_type\x18\x02 \x01(\x0e\x32&.google.api.MetricDescriptor.ValueType\x12<\n\x0bmetric_kind\x18\x03 \x01(\x0e\x32\'.google.api.MetricDescriptor.MetricKind"\x86\x02\n\x0eTimeSeriesData\x12\x36\n\x0clabel_values\x18\x01 \x03(\x0b\x32 .google.monitoring.v3.LabelValue\x12\x42\n\npoint_data\x18\x02 \x03(\x0b\x32..google.monitoring.v3.TimeSeriesData.PointData\x1ax\n\tPointData\x12\x30\n\x06values\x18\x01 \x03(\x0b\x32 .google.monitoring.v3.TypedValue\x12\x39\n\rtime_interval\x18\x02 \x01(\x0b\x32".google.monitoring.v3.TimeInterval"Z\n\nLabelValue\x12\x14\n\nbool_value\x18\x01 \x01(\x08H\x00\x12\x15\n\x0bint64_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x42\x07\n\x05value"Q\n\nQueryError\x12\x32\n\x07locator\x18\x01 \x01(\x0b\x32!.google.monitoring.v3.TextLocator\x12\x0f\n\x07message\x18\x02 \x01(\t"\xa0\x02\n\x0bTextLocator\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x42\n\x0estart_position\x18\x02 \x01(\x0b\x32*.google.monitoring.v3.TextLocator.Position\x12@\n\x0c\x65nd_position\x18\x03 \x01(\x0b\x32*.google.monitoring.v3.TextLocator.Position\x12\x39\n\x0enested_locator\x18\x04 \x01(\x0b\x32!.google.monitoring.v3.TextLocator\x12\x16\n\x0enesting_reason\x18\x05 \x01(\t\x1a(\n\x08Position\x12\x0c\n\x04line\x18\x01 \x01(\x05\x12\x0e\n\x06\x63olumn\x18\x02 \x01(\x05\x42\xc3\x01\n\x18\x63om.google.monitoring.v3B\x0bMetricProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_distribution__pb2.DESCRIPTOR, - google_dot_api_dot_label__pb2.DESCRIPTOR, - google_dot_api_dot_metric__pb2.DESCRIPTOR, - google_dot_api_dot_monitored__resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - ], -) - - -_POINT = _descriptor.Descriptor( - name="Point", - full_name="google.monitoring.v3.Point", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="interval", - full_name="google.monitoring.v3.Point.interval", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.monitoring.v3.Point.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=267, - serialized_end=377, -) - - -_TIMESERIES = _descriptor.Descriptor( - name="TimeSeries", - full_name="google.monitoring.v3.TimeSeries", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="metric", - full_name="google.monitoring.v3.TimeSeries.metric", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="resource", - full_name="google.monitoring.v3.TimeSeries.resource", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.monitoring.v3.TimeSeries.metadata", - index=2, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metric_kind", - full_name="google.monitoring.v3.TimeSeries.metric_kind", - index=3, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value_type", - full_name="google.monitoring.v3.TimeSeries.value_type", - index=4, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="points", - full_name="google.monitoring.v3.TimeSeries.points", - index=5, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=380, - serialized_end=701, -) - - -_TIMESERIESDESCRIPTOR_VALUEDESCRIPTOR = _descriptor.Descriptor( - name="ValueDescriptor", - full_name="google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value_type", - full_name="google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.value_type", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metric_kind", - full_name="google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.metric_kind", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=872, - serialized_end=1024, -) - -_TIMESERIESDESCRIPTOR = _descriptor.Descriptor( - name="TimeSeriesDescriptor", - full_name="google.monitoring.v3.TimeSeriesDescriptor", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="label_descriptors", - full_name="google.monitoring.v3.TimeSeriesDescriptor.label_descriptors", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="point_descriptors", - full_name="google.monitoring.v3.TimeSeriesDescriptor.point_descriptors", - index=1, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_TIMESERIESDESCRIPTOR_VALUEDESCRIPTOR], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=704, - serialized_end=1024, -) - - -_TIMESERIESDATA_POINTDATA = _descriptor.Descriptor( - name="PointData", - full_name="google.monitoring.v3.TimeSeriesData.PointData", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="values", - full_name="google.monitoring.v3.TimeSeriesData.PointData.values", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="time_interval", - full_name="google.monitoring.v3.TimeSeriesData.PointData.time_interval", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1169, - serialized_end=1289, -) - -_TIMESERIESDATA = _descriptor.Descriptor( - name="TimeSeriesData", - full_name="google.monitoring.v3.TimeSeriesData", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="label_values", - full_name="google.monitoring.v3.TimeSeriesData.label_values", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="point_data", - full_name="google.monitoring.v3.TimeSeriesData.point_data", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_TIMESERIESDATA_POINTDATA], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1027, - serialized_end=1289, -) - - -_LABELVALUE = _descriptor.Descriptor( - name="LabelValue", - full_name="google.monitoring.v3.LabelValue", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="bool_value", - full_name="google.monitoring.v3.LabelValue.bool_value", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="int64_value", - full_name="google.monitoring.v3.LabelValue.int64_value", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="string_value", - full_name="google.monitoring.v3.LabelValue.string_value", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="value", - full_name="google.monitoring.v3.LabelValue.value", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1291, - serialized_end=1381, -) - - -_QUERYERROR = _descriptor.Descriptor( - name="QueryError", - full_name="google.monitoring.v3.QueryError", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="locator", - full_name="google.monitoring.v3.QueryError.locator", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="message", - full_name="google.monitoring.v3.QueryError.message", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1383, - serialized_end=1464, -) - - -_TEXTLOCATOR_POSITION = _descriptor.Descriptor( - name="Position", - full_name="google.monitoring.v3.TextLocator.Position", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="line", - full_name="google.monitoring.v3.TextLocator.Position.line", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="column", - full_name="google.monitoring.v3.TextLocator.Position.column", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1715, - serialized_end=1755, -) - -_TEXTLOCATOR = _descriptor.Descriptor( - name="TextLocator", - full_name="google.monitoring.v3.TextLocator", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="source", - full_name="google.monitoring.v3.TextLocator.source", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_position", - full_name="google.monitoring.v3.TextLocator.start_position", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_position", - full_name="google.monitoring.v3.TextLocator.end_position", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="nested_locator", - full_name="google.monitoring.v3.TextLocator.nested_locator", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="nesting_reason", - full_name="google.monitoring.v3.TextLocator.nesting_reason", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_TEXTLOCATOR_POSITION], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1467, - serialized_end=1755, -) - -_POINT.fields_by_name[ - "interval" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._TIMEINTERVAL -) -_POINT.fields_by_name[ - "value" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._TYPEDVALUE -) -_TIMESERIES.fields_by_name[ - "metric" -].message_type = google_dot_api_dot_metric__pb2._METRIC -_TIMESERIES.fields_by_name[ - "resource" -].message_type = google_dot_api_dot_monitored__resource__pb2._MONITOREDRESOURCE -_TIMESERIES.fields_by_name[ - "metadata" -].message_type = google_dot_api_dot_monitored__resource__pb2._MONITOREDRESOURCEMETADATA -_TIMESERIES.fields_by_name[ - "metric_kind" -].enum_type = google_dot_api_dot_metric__pb2._METRICDESCRIPTOR_METRICKIND -_TIMESERIES.fields_by_name[ - "value_type" -].enum_type = google_dot_api_dot_metric__pb2._METRICDESCRIPTOR_VALUETYPE -_TIMESERIES.fields_by_name["points"].message_type = _POINT -_TIMESERIESDESCRIPTOR_VALUEDESCRIPTOR.fields_by_name[ - "value_type" -].enum_type = google_dot_api_dot_metric__pb2._METRICDESCRIPTOR_VALUETYPE -_TIMESERIESDESCRIPTOR_VALUEDESCRIPTOR.fields_by_name[ - "metric_kind" -].enum_type = google_dot_api_dot_metric__pb2._METRICDESCRIPTOR_METRICKIND -_TIMESERIESDESCRIPTOR_VALUEDESCRIPTOR.containing_type = _TIMESERIESDESCRIPTOR -_TIMESERIESDESCRIPTOR.fields_by_name[ - "label_descriptors" -].message_type = google_dot_api_dot_label__pb2._LABELDESCRIPTOR -_TIMESERIESDESCRIPTOR.fields_by_name[ - "point_descriptors" -].message_type = _TIMESERIESDESCRIPTOR_VALUEDESCRIPTOR -_TIMESERIESDATA_POINTDATA.fields_by_name[ - "values" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._TYPEDVALUE -) -_TIMESERIESDATA_POINTDATA.fields_by_name[ - "time_interval" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._TIMEINTERVAL -) -_TIMESERIESDATA_POINTDATA.containing_type = _TIMESERIESDATA -_TIMESERIESDATA.fields_by_name["label_values"].message_type = _LABELVALUE -_TIMESERIESDATA.fields_by_name["point_data"].message_type = _TIMESERIESDATA_POINTDATA -_LABELVALUE.oneofs_by_name["value"].fields.append( - _LABELVALUE.fields_by_name["bool_value"] -) -_LABELVALUE.fields_by_name["bool_value"].containing_oneof = _LABELVALUE.oneofs_by_name[ - "value" -] -_LABELVALUE.oneofs_by_name["value"].fields.append( - _LABELVALUE.fields_by_name["int64_value"] -) -_LABELVALUE.fields_by_name["int64_value"].containing_oneof = _LABELVALUE.oneofs_by_name[ - "value" -] -_LABELVALUE.oneofs_by_name["value"].fields.append( - _LABELVALUE.fields_by_name["string_value"] -) -_LABELVALUE.fields_by_name[ - "string_value" -].containing_oneof = _LABELVALUE.oneofs_by_name["value"] -_QUERYERROR.fields_by_name["locator"].message_type = _TEXTLOCATOR -_TEXTLOCATOR_POSITION.containing_type = _TEXTLOCATOR -_TEXTLOCATOR.fields_by_name["start_position"].message_type = _TEXTLOCATOR_POSITION -_TEXTLOCATOR.fields_by_name["end_position"].message_type = _TEXTLOCATOR_POSITION -_TEXTLOCATOR.fields_by_name["nested_locator"].message_type = _TEXTLOCATOR -DESCRIPTOR.message_types_by_name["Point"] = _POINT -DESCRIPTOR.message_types_by_name["TimeSeries"] = _TIMESERIES -DESCRIPTOR.message_types_by_name["TimeSeriesDescriptor"] = _TIMESERIESDESCRIPTOR -DESCRIPTOR.message_types_by_name["TimeSeriesData"] = _TIMESERIESDATA -DESCRIPTOR.message_types_by_name["LabelValue"] = _LABELVALUE -DESCRIPTOR.message_types_by_name["QueryError"] = _QUERYERROR -DESCRIPTOR.message_types_by_name["TextLocator"] = _TEXTLOCATOR -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Point = _reflection.GeneratedProtocolMessageType( - "Point", - (_message.Message,), - { - "DESCRIPTOR": _POINT, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """A single data point in a time series. - - - Attributes: - interval: - The time interval to which the data point applies. For - ``GAUGE`` metrics, the start time is optional, but if it is - supplied, it must equal the end time. For ``DELTA`` metrics, - the start and end time should specify a non-zero interval, - with subsequent points specifying contiguous and non- - overlapping intervals. For ``CUMULATIVE`` metrics, the start - and end time should specify a non-zero interval, with - subsequent points specifying the same start time and - increasing end times, until an event resets the cumulative - value to zero and sets a new start time for the following - points. - value: - The value of the data point. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Point) - }, -) -_sym_db.RegisterMessage(Point) - -TimeSeries = _reflection.GeneratedProtocolMessageType( - "TimeSeries", - (_message.Message,), - { - "DESCRIPTOR": _TIMESERIES, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """A collection of data points that describes the - time-varying values of a metric. A time series is identified by a - combination of a fully-specified monitored resource and a - fully-specified metric. This type is used for both listing and creating - time series. - - - Attributes: - metric: - The associated metric. A fully-specified metric used to - identify the time series. - resource: - The associated monitored resource. Custom metrics can use only - certain monitored resource types in their time series data. - metadata: - Output only. The associated monitored resource metadata. When - reading a a timeseries, this field will include metadata - labels that are explicitly named in the reduction. When - creating a timeseries, this field is ignored. - metric_kind: - The metric kind of the time series. When listing time series, - this metric kind might be different from the metric kind of - the associated metric if this time series is an alignment or - reduction of other time series. When creating a time series, - this field is optional. If present, it must be the same as the - metric kind of the associated metric. If the associated - metric’s descriptor must be auto-created, then this field - specifies the metric kind of the new descriptor and must be - either ``GAUGE`` (the default) or ``CUMULATIVE``. - value_type: - The value type of the time series. When listing time series, - this value type might be different from the value type of the - associated metric if this time series is an alignment or - reduction of other time series. When creating a time series, - this field is optional. If present, it must be the same as the - type of the data in the ``points`` field. - points: - The data points of this time series. When listing time series, - points are returned in reverse time order. When creating a - time series, this field must contain exactly one point and the - point’s type must be the same as the value type of the - associated metric. If the associated metric’s descriptor must - be auto-created, then the value type of the descriptor is - determined by the point’s type, which must be ``BOOL``, - ``INT64``, ``DOUBLE``, or ``DISTRIBUTION``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeSeries) - }, -) -_sym_db.RegisterMessage(TimeSeries) - -TimeSeriesDescriptor = _reflection.GeneratedProtocolMessageType( - "TimeSeriesDescriptor", - (_message.Message,), - { - "ValueDescriptor": _reflection.GeneratedProtocolMessageType( - "ValueDescriptor", - (_message.Message,), - { - "DESCRIPTOR": _TIMESERIESDESCRIPTOR_VALUEDESCRIPTOR, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """A descriptor for the value columns in a data point. - - - Attributes: - key: - The value key. - value_type: - The value type. - metric_kind: - The value stream kind. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor) - }, - ), - "DESCRIPTOR": _TIMESERIESDESCRIPTOR, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """A descriptor for the labels and points in a timeseries. - - - Attributes: - label_descriptors: - Descriptors for the labels. - point_descriptors: - Descriptors for the point data value columns. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeSeriesDescriptor) - }, -) -_sym_db.RegisterMessage(TimeSeriesDescriptor) -_sym_db.RegisterMessage(TimeSeriesDescriptor.ValueDescriptor) - -TimeSeriesData = _reflection.GeneratedProtocolMessageType( - "TimeSeriesData", - (_message.Message,), - { - "PointData": _reflection.GeneratedProtocolMessageType( - "PointData", - (_message.Message,), - { - "DESCRIPTOR": _TIMESERIESDATA_POINTDATA, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """A point’s value columns and time interval. Each point has - one or more point values corresponding to the entries in - ``point_descriptors`` field in the TimeSeriesDescriptor associated with - this object. - - - Attributes: - values: - The values that make up the point. - time_interval: - The time interval associated with the point. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeSeriesData.PointData) - }, - ), - "DESCRIPTOR": _TIMESERIESDATA, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """Represents the values of a time series associated with a - TimeSeriesDescriptor. - - - Attributes: - label_values: - The values of the labels in the time series identifier, given - in the same order as the ``label_descriptors`` field of the - TimeSeriesDescriptor associated with this object. Each value - must have a value of the type given in the corresponding entry - of ``label_descriptors``. - point_data: - The points in the time series. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeSeriesData) - }, -) -_sym_db.RegisterMessage(TimeSeriesData) -_sym_db.RegisterMessage(TimeSeriesData.PointData) - -LabelValue = _reflection.GeneratedProtocolMessageType( - "LabelValue", - (_message.Message,), - { - "DESCRIPTOR": _LABELVALUE, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """A label value. - - - Attributes: - value: - The label value can be a bool, int64, or string. - bool_value: - A bool label value. - int64_value: - An int64 label value. - string_value: - A string label value. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.LabelValue) - }, -) -_sym_db.RegisterMessage(LabelValue) - -QueryError = _reflection.GeneratedProtocolMessageType( - "QueryError", - (_message.Message,), - { - "DESCRIPTOR": _QUERYERROR, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """An error associated with a query in the time series query - language format. - - - Attributes: - locator: - The location of the time series query language text that this - error applies to. - message: - The error message. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.QueryError) - }, -) -_sym_db.RegisterMessage(QueryError) - -TextLocator = _reflection.GeneratedProtocolMessageType( - "TextLocator", - (_message.Message,), - { - "Position": _reflection.GeneratedProtocolMessageType( - "Position", - (_message.Message,), - { - "DESCRIPTOR": _TEXTLOCATOR_POSITION, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """The position of a byte within the text. - - - Attributes: - line: - The line, starting with 1, where the byte is positioned. - column: - The column within the line, starting with 1, where the byte is - positioned. This is a byte index even though the text is - UTF-8. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TextLocator.Position) - }, - ), - "DESCRIPTOR": _TEXTLOCATOR, - "__module__": "google.cloud.monitoring_v3.proto.metric_pb2", - "__doc__": """A locator for text. Indicates a particular part of the - text of a request or of an object referenced in the request. - - For example, suppose the request field ``text`` contains: - - text: “The quick brown fox jumps over the lazy dog.” - - Then the locator: - - source: “text” start_position { line: 1 column: 17 } end_position { - line: 1 column: 19 } - - refers to the part of the text: “fox”. - - - Attributes: - source: - The source of the text. The source may be a field in the - request, in which case its format is the format of the - google.rpc.BadRequest.FieldViolation.field field in - https://cloud.google.com/apis/design/errors#error_details. It - may also be be a source other than the request field (e.g. a - macro definition referenced in the text of the query), in - which case this is the name of the source (e.g. the macro - name). - start_position: - The position of the first byte within the text. - end_position: - The position of the last byte within the text. - nested_locator: - If ``source``, ``start_position``, and ``end_position`` - describe a call on some object (e.g. a macro in the time - series query language text) and a location is to be designated - in that object’s text, ``nested_locator`` identifies the - location within that object. - nesting_reason: - When ``nested_locator`` is set, this field gives the reason - for the nesting. Usually, the reason is a macro invocation. In - that case, the macro name (including the leading ‘@’) signals - the location of the macro call in the text and a macro - argument name (including the leading ‘$’) signals the location - of the macro argument inside the macro body that got - substituted away. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TextLocator) - }, -) -_sym_db.RegisterMessage(TextLocator) -_sym_db.RegisterMessage(TextLocator.Position) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/metric_pb2_grpc.py b/google/cloud/monitoring_v3/proto/metric_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/metric_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/metric_service_pb2.py b/google/cloud/monitoring_v3/proto/metric_service_pb2.py deleted file mode 100644 index 19a5cc2c..00000000 --- a/google/cloud/monitoring_v3/proto/metric_service_pb2.py +++ /dev/null @@ -1,1955 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/metric_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import metric_pb2 as google_dot_api_dot_metric__pb2 -from google.api import ( - monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2, -) -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.monitoring_v3.proto import ( - alert_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2, -) -from google.cloud.monitoring_v3.proto import ( - common_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2, -) -from google.cloud.monitoring_v3.proto import ( - metric_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/metric_service.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\022MetricServiceProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3\352A\360\001\n*monitoring.googleapis.com/MetricDescriptor\022;projects/{project}/metricDescriptors/{metric_descriptor=**}\022Eorganizations/{organization}/metricDescriptors/{metric_descriptor=**}\0229folders/{folder}/metricDescriptors/{metric_descriptor=**}\022\001* \001\352A\267\002\n5monitoring.googleapis.com/MonitoredResourceDescriptor\022Oprojects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}\022Yorganizations/{organization}/monitoredResourceDescriptors/{monitored_resource_descriptor}\022Mfolders/{folder}/monitoredResourceDescriptors/{monitored_resource_descriptor}\022\001* \001", - serialized_pb=b'\n5google/cloud/monitoring_v3/proto/metric_service.proto\x12\x14google.monitoring.v3\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x17google/api/metric.proto\x1a#google/api/monitored_resource.proto\x1a\x19google/api/resource.proto\x1a,google/cloud/monitoring_v3/proto/alert.proto\x1a-google/cloud/monitoring_v3/proto/common.proto\x1a-google/cloud/monitoring_v3/proto/metric.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x17google/rpc/status.proto"\xad\x01\n\'ListMonitoredResourceDescriptorsRequest\x12K\n\x04name\x18\x05 \x01(\tB=\xe0\x41\x02\xfa\x41\x37\x12\x35monitoring.googleapis.com/MonitoredResourceDescriptor\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"\x8a\x01\n(ListMonitoredResourceDescriptorsResponse\x12\x45\n\x14resource_descriptors\x18\x01 \x03(\x0b\x32\'.google.api.MonitoredResourceDescriptor\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"t\n%GetMonitoredResourceDescriptorRequest\x12K\n\x04name\x18\x03 \x01(\tB=\xe0\x41\x02\xfa\x41\x37\n5monitoring.googleapis.com/MonitoredResourceDescriptor"\x97\x01\n\x1cListMetricDescriptorsRequest\x12@\n\x04name\x18\x05 \x01(\tB2\xe0\x41\x02\xfa\x41,\x12*monitoring.googleapis.com/MetricDescriptor\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"r\n\x1dListMetricDescriptorsResponse\x12\x38\n\x12metric_descriptors\x18\x01 \x03(\x0b\x32\x1c.google.api.MetricDescriptor\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"^\n\x1aGetMetricDescriptorRequest\x12@\n\x04name\x18\x03 \x01(\tB2\xe0\x41\x02\xfa\x41,\n*monitoring.googleapis.com/MetricDescriptor"\x9f\x01\n\x1d\x43reateMetricDescriptorRequest\x12@\n\x04name\x18\x03 \x01(\tB2\xe0\x41\x02\xfa\x41,\x12*monitoring.googleapis.com/MetricDescriptor\x12<\n\x11metric_descriptor\x18\x02 \x01(\x0b\x32\x1c.google.api.MetricDescriptorB\x03\xe0\x41\x02"a\n\x1d\x44\x65leteMetricDescriptorRequest\x12@\n\x04name\x18\x03 \x01(\tB2\xe0\x41\x02\xfa\x41,\n*monitoring.googleapis.com/MetricDescriptor"\x93\x03\n\x15ListTimeSeriesRequest\x12\x41\n\x04name\x18\n \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x13\n\x06\x66ilter\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08interval\x18\x04 \x01(\x0b\x32".google.monitoring.v3.TimeIntervalB\x03\xe0\x41\x02\x12\x36\n\x0b\x61ggregation\x18\x05 \x01(\x0b\x32!.google.monitoring.v3.Aggregation\x12\x10\n\x08order_by\x18\x06 \x01(\t\x12M\n\x04view\x18\x07 \x01(\x0e\x32:.google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesViewB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x08 \x01(\x05\x12\x12\n\npage_token\x18\t \x01(\t"\'\n\x0eTimeSeriesView\x12\x08\n\x04\x46ULL\x10\x00\x12\x0b\n\x07HEADERS\x10\x01"\x96\x01\n\x16ListTimeSeriesResponse\x12\x35\n\x0btime_series\x18\x01 \x03(\x0b\x32 .google.monitoring.v3.TimeSeries\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12,\n\x10\x65xecution_errors\x18\x03 \x03(\x0b\x32\x12.google.rpc.Status"\x98\x01\n\x17\x43reateTimeSeriesRequest\x12\x41\n\x04name\x18\x03 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12:\n\x0btime_series\x18\x02 \x03(\x0b\x32 .google.monitoring.v3.TimeSeriesB\x03\xe0\x41\x02"z\n\x15\x43reateTimeSeriesError\x12\x39\n\x0btime_series\x18\x01 \x01(\x0b\x32 .google.monitoring.v3.TimeSeriesB\x02\x18\x01\x12&\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.StatusB\x02\x18\x01"\xd8\x01\n\x17\x43reateTimeSeriesSummary\x12\x19\n\x11total_point_count\x18\x01 \x01(\x05\x12\x1b\n\x13success_point_count\x18\x02 \x01(\x05\x12\x43\n\x06\x65rrors\x18\x03 \x03(\x0b\x32\x33.google.monitoring.v3.CreateTimeSeriesSummary.Error\x1a@\n\x05\x45rror\x12"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x13\n\x0bpoint_count\x18\x02 \x01(\x05"\\\n\x16QueryTimeSeriesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05query\x18\x07 \x01(\t\x12\x11\n\tpage_size\x18\t \x01(\x05\x12\x12\n\npage_token\x18\n \x01(\t"\xea\x01\n\x17QueryTimeSeriesResponse\x12J\n\x16time_series_descriptor\x18\x08 \x01(\x0b\x32*.google.monitoring.v3.TimeSeriesDescriptor\x12>\n\x10time_series_data\x18\t \x03(\x0b\x32$.google.monitoring.v3.TimeSeriesData\x12\x17\n\x0fnext_page_token\x18\n \x01(\t\x12*\n\x0epartial_errors\x18\x0b \x03(\x0b\x32\x12.google.rpc.Status"Y\n\x0eQueryErrorList\x12\x30\n\x06\x65rrors\x18\x01 \x03(\x0b\x32 .google.monitoring.v3.QueryError\x12\x15\n\rerror_summary\x18\x02 \x01(\t2\xbe\r\n\rMetricService\x12\xe4\x01\n ListMonitoredResourceDescriptors\x12=.google.monitoring.v3.ListMonitoredResourceDescriptorsRequest\x1a>.google.monitoring.v3.ListMonitoredResourceDescriptorsResponse"A\x82\xd3\xe4\x93\x02\x34\x12\x32/v3/{name=projects/*}/monitoredResourceDescriptors\xda\x41\x04name\x12\xcc\x01\n\x1eGetMonitoredResourceDescriptor\x12;.google.monitoring.v3.GetMonitoredResourceDescriptorRequest\x1a\'.google.api.MonitoredResourceDescriptor"D\x82\xd3\xe4\x93\x02\x37\x12\x35/v3/{name=projects/*/monitoredResourceDescriptors/**}\xda\x41\x04name\x12\xb8\x01\n\x15ListMetricDescriptors\x12\x32.google.monitoring.v3.ListMetricDescriptorsRequest\x1a\x33.google.monitoring.v3.ListMetricDescriptorsResponse"6\x82\xd3\xe4\x93\x02)\x12\'/v3/{name=projects/*}/metricDescriptors\xda\x41\x04name\x12\xa0\x01\n\x13GetMetricDescriptor\x12\x30.google.monitoring.v3.GetMetricDescriptorRequest\x1a\x1c.google.api.MetricDescriptor"9\x82\xd3\xe4\x93\x02,\x12*/v3/{name=projects/*/metricDescriptors/**}\xda\x41\x04name\x12\xc8\x01\n\x16\x43reateMetricDescriptor\x12\x33.google.monitoring.v3.CreateMetricDescriptorRequest\x1a\x1c.google.api.MetricDescriptor"[\x82\xd3\xe4\x93\x02<"\'/v3/{name=projects/*}/metricDescriptors:\x11metric_descriptor\xda\x41\x16name,metric_descriptor\x12\xa0\x01\n\x16\x44\x65leteMetricDescriptor\x12\x33.google.monitoring.v3.DeleteMetricDescriptorRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v3/{name=projects/*/metricDescriptors/**}\xda\x41\x04name\x12\xb1\x01\n\x0eListTimeSeries\x12+.google.monitoring.v3.ListTimeSeriesRequest\x1a,.google.monitoring.v3.ListTimeSeriesResponse"D\x82\xd3\xe4\x93\x02"\x12 /v3/{name=projects/*}/timeSeries\xda\x41\x19name,filter,interval,view\x12\x99\x01\n\x10\x43reateTimeSeries\x12-.google.monitoring.v3.CreateTimeSeriesRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02%" /v3/{name=projects/*}/timeSeries:\x01*\xda\x41\x10name,time_series\x1a\xda\x01\xca\x41\x19monitoring.googleapis.com\xd2\x41\xba\x01https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.read,https://www.googleapis.com/auth/monitoring.writeB\xf9\x05\n\x18\x63om.google.monitoring.v3B\x12MetricServiceProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3\xea\x41\xf0\x01\n*monitoring.googleapis.com/MetricDescriptor\x12;projects/{project}/metricDescriptors/{metric_descriptor=**}\x12\x45organizations/{organization}/metricDescriptors/{metric_descriptor=**}\x12\x39\x66olders/{folder}/metricDescriptors/{metric_descriptor=**}\x12\x01* \x01\xea\x41\xb7\x02\n5monitoring.googleapis.com/MonitoredResourceDescriptor\x12Oprojects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}\x12Yorganizations/{organization}/monitoredResourceDescriptors/{monitored_resource_descriptor}\x12Mfolders/{folder}/monitoredResourceDescriptors/{monitored_resource_descriptor}\x12\x01* \x01\x62\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_metric__pb2.DESCRIPTOR, - google_dot_api_dot_monitored__resource__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - ], -) - - -_LISTTIMESERIESREQUEST_TIMESERIESVIEW = _descriptor.EnumDescriptor( - name="TimeSeriesView", - full_name="google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="FULL", index=0, number=0, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="HEADERS", index=1, number=1, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1909, - serialized_end=1948, -) -_sym_db.RegisterEnumDescriptor(_LISTTIMESERIESREQUEST_TIMESERIESVIEW) - - -_LISTMONITOREDRESOURCEDESCRIPTORSREQUEST = _descriptor.Descriptor( - name="ListMonitoredResourceDescriptorsRequest", - full_name="google.monitoring.v3.ListMonitoredResourceDescriptorsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ListMonitoredResourceDescriptorsRequest.name", - index=0, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A7\0225monitoring.googleapis.com/MonitoredResourceDescriptor", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.ListMonitoredResourceDescriptorsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListMonitoredResourceDescriptorsRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListMonitoredResourceDescriptorsRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=483, - serialized_end=656, -) - - -_LISTMONITOREDRESOURCEDESCRIPTORSRESPONSE = _descriptor.Descriptor( - name="ListMonitoredResourceDescriptorsResponse", - full_name="google.monitoring.v3.ListMonitoredResourceDescriptorsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="resource_descriptors", - full_name="google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.resource_descriptors", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=659, - serialized_end=797, -) - - -_GETMONITOREDRESOURCEDESCRIPTORREQUEST = _descriptor.Descriptor( - name="GetMonitoredResourceDescriptorRequest", - full_name="google.monitoring.v3.GetMonitoredResourceDescriptorRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetMonitoredResourceDescriptorRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A7\n5monitoring.googleapis.com/MonitoredResourceDescriptor", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=799, - serialized_end=915, -) - - -_LISTMETRICDESCRIPTORSREQUEST = _descriptor.Descriptor( - name="ListMetricDescriptorsRequest", - full_name="google.monitoring.v3.ListMetricDescriptorsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ListMetricDescriptorsRequest.name", - index=0, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A,\022*monitoring.googleapis.com/MetricDescriptor", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.ListMetricDescriptorsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListMetricDescriptorsRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListMetricDescriptorsRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=918, - serialized_end=1069, -) - - -_LISTMETRICDESCRIPTORSRESPONSE = _descriptor.Descriptor( - name="ListMetricDescriptorsResponse", - full_name="google.monitoring.v3.ListMetricDescriptorsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="metric_descriptors", - full_name="google.monitoring.v3.ListMetricDescriptorsResponse.metric_descriptors", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListMetricDescriptorsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1071, - serialized_end=1185, -) - - -_GETMETRICDESCRIPTORREQUEST = _descriptor.Descriptor( - name="GetMetricDescriptorRequest", - full_name="google.monitoring.v3.GetMetricDescriptorRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetMetricDescriptorRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A,\n*monitoring.googleapis.com/MetricDescriptor", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1187, - serialized_end=1281, -) - - -_CREATEMETRICDESCRIPTORREQUEST = _descriptor.Descriptor( - name="CreateMetricDescriptorRequest", - full_name="google.monitoring.v3.CreateMetricDescriptorRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.CreateMetricDescriptorRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A,\022*monitoring.googleapis.com/MetricDescriptor", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metric_descriptor", - full_name="google.monitoring.v3.CreateMetricDescriptorRequest.metric_descriptor", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1284, - serialized_end=1443, -) - - -_DELETEMETRICDESCRIPTORREQUEST = _descriptor.Descriptor( - name="DeleteMetricDescriptorRequest", - full_name="google.monitoring.v3.DeleteMetricDescriptorRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.DeleteMetricDescriptorRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A,\n*monitoring.googleapis.com/MetricDescriptor", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1445, - serialized_end=1542, -) - - -_LISTTIMESERIESREQUEST = _descriptor.Descriptor( - name="ListTimeSeriesRequest", - full_name="google.monitoring.v3.ListTimeSeriesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ListTimeSeriesRequest.name", - index=0, - number=10, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.ListTimeSeriesRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="interval", - full_name="google.monitoring.v3.ListTimeSeriesRequest.interval", - index=2, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="aggregation", - full_name="google.monitoring.v3.ListTimeSeriesRequest.aggregation", - index=3, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.monitoring.v3.ListTimeSeriesRequest.order_by", - index=4, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.monitoring.v3.ListTimeSeriesRequest.view", - index=5, - number=7, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListTimeSeriesRequest.page_size", - index=6, - number=8, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListTimeSeriesRequest.page_token", - index=7, - number=9, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_LISTTIMESERIESREQUEST_TIMESERIESVIEW], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1545, - serialized_end=1948, -) - - -_LISTTIMESERIESRESPONSE = _descriptor.Descriptor( - name="ListTimeSeriesResponse", - full_name="google.monitoring.v3.ListTimeSeriesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="time_series", - full_name="google.monitoring.v3.ListTimeSeriesResponse.time_series", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListTimeSeriesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="execution_errors", - full_name="google.monitoring.v3.ListTimeSeriesResponse.execution_errors", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1951, - serialized_end=2101, -) - - -_CREATETIMESERIESREQUEST = _descriptor.Descriptor( - name="CreateTimeSeriesRequest", - full_name="google.monitoring.v3.CreateTimeSeriesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.CreateTimeSeriesRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="time_series", - full_name="google.monitoring.v3.CreateTimeSeriesRequest.time_series", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2104, - serialized_end=2256, -) - - -_CREATETIMESERIESERROR = _descriptor.Descriptor( - name="CreateTimeSeriesError", - full_name="google.monitoring.v3.CreateTimeSeriesError", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="time_series", - full_name="google.monitoring.v3.CreateTimeSeriesError.time_series", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.monitoring.v3.CreateTimeSeriesError.status", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2258, - serialized_end=2380, -) - - -_CREATETIMESERIESSUMMARY_ERROR = _descriptor.Descriptor( - name="Error", - full_name="google.monitoring.v3.CreateTimeSeriesSummary.Error", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="status", - full_name="google.monitoring.v3.CreateTimeSeriesSummary.Error.status", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="point_count", - full_name="google.monitoring.v3.CreateTimeSeriesSummary.Error.point_count", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2535, - serialized_end=2599, -) - -_CREATETIMESERIESSUMMARY = _descriptor.Descriptor( - name="CreateTimeSeriesSummary", - full_name="google.monitoring.v3.CreateTimeSeriesSummary", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="total_point_count", - full_name="google.monitoring.v3.CreateTimeSeriesSummary.total_point_count", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="success_point_count", - full_name="google.monitoring.v3.CreateTimeSeriesSummary.success_point_count", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="errors", - full_name="google.monitoring.v3.CreateTimeSeriesSummary.errors", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_CREATETIMESERIESSUMMARY_ERROR], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2383, - serialized_end=2599, -) - - -_QUERYTIMESERIESREQUEST = _descriptor.Descriptor( - name="QueryTimeSeriesRequest", - full_name="google.monitoring.v3.QueryTimeSeriesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.QueryTimeSeriesRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query", - full_name="google.monitoring.v3.QueryTimeSeriesRequest.query", - index=1, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.QueryTimeSeriesRequest.page_size", - index=2, - number=9, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.QueryTimeSeriesRequest.page_token", - index=3, - number=10, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2601, - serialized_end=2693, -) - - -_QUERYTIMESERIESRESPONSE = _descriptor.Descriptor( - name="QueryTimeSeriesResponse", - full_name="google.monitoring.v3.QueryTimeSeriesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="time_series_descriptor", - full_name="google.monitoring.v3.QueryTimeSeriesResponse.time_series_descriptor", - index=0, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="time_series_data", - full_name="google.monitoring.v3.QueryTimeSeriesResponse.time_series_data", - index=1, - number=9, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.QueryTimeSeriesResponse.next_page_token", - index=2, - number=10, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="partial_errors", - full_name="google.monitoring.v3.QueryTimeSeriesResponse.partial_errors", - index=3, - number=11, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2696, - serialized_end=2930, -) - - -_QUERYERRORLIST = _descriptor.Descriptor( - name="QueryErrorList", - full_name="google.monitoring.v3.QueryErrorList", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="errors", - full_name="google.monitoring.v3.QueryErrorList.errors", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="error_summary", - full_name="google.monitoring.v3.QueryErrorList.error_summary", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2932, - serialized_end=3021, -) - -_LISTMONITOREDRESOURCEDESCRIPTORSRESPONSE.fields_by_name[ - "resource_descriptors" -].message_type = ( - google_dot_api_dot_monitored__resource__pb2._MONITOREDRESOURCEDESCRIPTOR -) -_LISTMETRICDESCRIPTORSRESPONSE.fields_by_name[ - "metric_descriptors" -].message_type = google_dot_api_dot_metric__pb2._METRICDESCRIPTOR -_CREATEMETRICDESCRIPTORREQUEST.fields_by_name[ - "metric_descriptor" -].message_type = google_dot_api_dot_metric__pb2._METRICDESCRIPTOR -_LISTTIMESERIESREQUEST.fields_by_name[ - "interval" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._TIMEINTERVAL -) -_LISTTIMESERIESREQUEST.fields_by_name[ - "aggregation" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2._AGGREGATION -) -_LISTTIMESERIESREQUEST.fields_by_name[ - "view" -].enum_type = _LISTTIMESERIESREQUEST_TIMESERIESVIEW -_LISTTIMESERIESREQUEST_TIMESERIESVIEW.containing_type = _LISTTIMESERIESREQUEST -_LISTTIMESERIESRESPONSE.fields_by_name[ - "time_series" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__pb2._TIMESERIES -) -_LISTTIMESERIESRESPONSE.fields_by_name[ - "execution_errors" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_CREATETIMESERIESREQUEST.fields_by_name[ - "time_series" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__pb2._TIMESERIES -) -_CREATETIMESERIESERROR.fields_by_name[ - "time_series" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__pb2._TIMESERIES -) -_CREATETIMESERIESERROR.fields_by_name[ - "status" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_CREATETIMESERIESSUMMARY_ERROR.fields_by_name[ - "status" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_CREATETIMESERIESSUMMARY_ERROR.containing_type = _CREATETIMESERIESSUMMARY -_CREATETIMESERIESSUMMARY.fields_by_name[ - "errors" -].message_type = _CREATETIMESERIESSUMMARY_ERROR -_QUERYTIMESERIESRESPONSE.fields_by_name[ - "time_series_descriptor" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__pb2._TIMESERIESDESCRIPTOR -) -_QUERYTIMESERIESRESPONSE.fields_by_name[ - "time_series_data" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__pb2._TIMESERIESDATA -) -_QUERYTIMESERIESRESPONSE.fields_by_name[ - "partial_errors" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_QUERYERRORLIST.fields_by_name[ - "errors" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__pb2._QUERYERROR -) -DESCRIPTOR.message_types_by_name[ - "ListMonitoredResourceDescriptorsRequest" -] = _LISTMONITOREDRESOURCEDESCRIPTORSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListMonitoredResourceDescriptorsResponse" -] = _LISTMONITOREDRESOURCEDESCRIPTORSRESPONSE -DESCRIPTOR.message_types_by_name[ - "GetMonitoredResourceDescriptorRequest" -] = _GETMONITOREDRESOURCEDESCRIPTORREQUEST -DESCRIPTOR.message_types_by_name[ - "ListMetricDescriptorsRequest" -] = _LISTMETRICDESCRIPTORSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListMetricDescriptorsResponse" -] = _LISTMETRICDESCRIPTORSRESPONSE -DESCRIPTOR.message_types_by_name[ - "GetMetricDescriptorRequest" -] = _GETMETRICDESCRIPTORREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateMetricDescriptorRequest" -] = _CREATEMETRICDESCRIPTORREQUEST -DESCRIPTOR.message_types_by_name[ - "DeleteMetricDescriptorRequest" -] = _DELETEMETRICDESCRIPTORREQUEST -DESCRIPTOR.message_types_by_name["ListTimeSeriesRequest"] = _LISTTIMESERIESREQUEST -DESCRIPTOR.message_types_by_name["ListTimeSeriesResponse"] = _LISTTIMESERIESRESPONSE -DESCRIPTOR.message_types_by_name["CreateTimeSeriesRequest"] = _CREATETIMESERIESREQUEST -DESCRIPTOR.message_types_by_name["CreateTimeSeriesError"] = _CREATETIMESERIESERROR -DESCRIPTOR.message_types_by_name["CreateTimeSeriesSummary"] = _CREATETIMESERIESSUMMARY -DESCRIPTOR.message_types_by_name["QueryTimeSeriesRequest"] = _QUERYTIMESERIESREQUEST -DESCRIPTOR.message_types_by_name["QueryTimeSeriesResponse"] = _QUERYTIMESERIESRESPONSE -DESCRIPTOR.message_types_by_name["QueryErrorList"] = _QUERYERRORLIST -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ListMonitoredResourceDescriptorsRequest = _reflection.GeneratedProtocolMessageType( - "ListMonitoredResourceDescriptorsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTMONITOREDRESOURCEDESCRIPTORSREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``ListMonitoredResourceDescriptors`` request. - - - Attributes: - name: - Required. The project on which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] - filter: - An optional `filter - `__ - describing the descriptors to be returned. The filter can - reference the descriptor’s type and labels. For example, the - following filter returns only Google Compute Engine - descriptors that have an ``id`` label: :: - - resource.type = starts_with("gce_") AND resource.label:id - page_size: - A positive number that is the maximum number of results to - return. - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return - additional results from the previous method call. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListMonitoredResourceDescriptorsRequest) - }, -) -_sym_db.RegisterMessage(ListMonitoredResourceDescriptorsRequest) - -ListMonitoredResourceDescriptorsResponse = _reflection.GeneratedProtocolMessageType( - "ListMonitoredResourceDescriptorsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTMONITOREDRESOURCEDESCRIPTORSRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``ListMonitoredResourceDescriptors`` response. - - - Attributes: - resource_descriptors: - The monitored resource descriptors that are available to this - project and that match ``filter``, if present. - next_page_token: - If there are more results than have been returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListMonitoredResourceDescriptorsResponse) - }, -) -_sym_db.RegisterMessage(ListMonitoredResourceDescriptorsResponse) - -GetMonitoredResourceDescriptorRequest = _reflection.GeneratedProtocolMessageType( - "GetMonitoredResourceDescriptorRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETMONITOREDRESOURCEDESCRIPTORREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``GetMonitoredResourceDescriptor`` request. - - - Attributes: - name: - Required. The monitored resource descriptor to get. The format - is: :: projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceD - escriptors/[RESOURCE_TYPE] The ``[RESOURCE_TYPE]`` is a - predefined type, such as ``cloudsql_database``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetMonitoredResourceDescriptorRequest) - }, -) -_sym_db.RegisterMessage(GetMonitoredResourceDescriptorRequest) - -ListMetricDescriptorsRequest = _reflection.GeneratedProtocolMessageType( - "ListMetricDescriptorsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTMETRICDESCRIPTORSREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``ListMetricDescriptors`` request. - - - Attributes: - name: - Required. The project on which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] - filter: - If this field is empty, all custom and system-defined metric - descriptors are returned. Otherwise, the `filter - `__ - specifies which metric descriptors are to be returned. For - example, the following filter matches all `custom metrics - `__: :: - metric.type = starts_with("custom.googleapis.com/") - page_size: - A positive number that is the maximum number of results to - return. - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return - additional results from the previous method call. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListMetricDescriptorsRequest) - }, -) -_sym_db.RegisterMessage(ListMetricDescriptorsRequest) - -ListMetricDescriptorsResponse = _reflection.GeneratedProtocolMessageType( - "ListMetricDescriptorsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTMETRICDESCRIPTORSRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``ListMetricDescriptors`` response. - - - Attributes: - metric_descriptors: - The metric descriptors that are available to the project and - that match the value of ``filter``, if present. - next_page_token: - If there are more results than have been returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListMetricDescriptorsResponse) - }, -) -_sym_db.RegisterMessage(ListMetricDescriptorsResponse) - -GetMetricDescriptorRequest = _reflection.GeneratedProtocolMessageType( - "GetMetricDescriptorRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETMETRICDESCRIPTORREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``GetMetricDescriptor`` request. - - - Attributes: - name: - Required. The metric descriptor on which to execute the - request. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] - An example value of ``[METRIC_ID]`` is - ``"compute.googleapis.com/instance/disk/read_bytes_count"``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetMetricDescriptorRequest) - }, -) -_sym_db.RegisterMessage(GetMetricDescriptorRequest) - -CreateMetricDescriptorRequest = _reflection.GeneratedProtocolMessageType( - "CreateMetricDescriptorRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEMETRICDESCRIPTORREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``CreateMetricDescriptor`` request. - - - Attributes: - name: - Required. The project on which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] - metric_descriptor: - Required. The new `custom metric - `__ - descriptor. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateMetricDescriptorRequest) - }, -) -_sym_db.RegisterMessage(CreateMetricDescriptorRequest) - -DeleteMetricDescriptorRequest = _reflection.GeneratedProtocolMessageType( - "DeleteMetricDescriptorRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEMETRICDESCRIPTORREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``DeleteMetricDescriptor`` request. - - - Attributes: - name: - Required. The metric descriptor on which to execute the - request. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] - An example of ``[METRIC_ID]`` is: - ``"custom.googleapis.com/my_test_metric"``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteMetricDescriptorRequest) - }, -) -_sym_db.RegisterMessage(DeleteMetricDescriptorRequest) - -ListTimeSeriesRequest = _reflection.GeneratedProtocolMessageType( - "ListTimeSeriesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTTIMESERIESREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``ListTimeSeries`` request. - - - Attributes: - name: - Required. The project on which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] - filter: - Required. A `monitoring filter - `__ that - specifies which time series should be returned. The filter - must specify a single metric type, and can additionally - specify metric labels and other information. For example: :: - metric.type = "compute.googleapis.com/instance/cpu/usage_time" - AND metric.labels.instance_name = "my-instance-name" - interval: - Required. The time interval for which results should be - returned. Only time series that contain data points in the - specified interval are included in the response. - aggregation: - Specifies the alignment of data points in individual time - series as well as how to combine the retrieved time series - across specified labels. By default (if no ``aggregation`` is - explicitly specified), the raw time series data is returned. - order_by: - Unsupported: must be left blank. The points in each time - series are currently returned in reverse time order (most - recent to oldest). - view: - Required. Specifies which information is returned about the - time series. - page_size: - A positive number that is the maximum number of results to - return. If ``page_size`` is empty or more than 100,000 - results, the effective ``page_size`` is 100,000 results. If - ``view`` is set to ``FULL``, this is the maximum number of - ``Points`` returned. If ``view`` is set to ``HEADERS``, this - is the maximum number of ``TimeSeries`` returned. - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return - additional results from the previous method call. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListTimeSeriesRequest) - }, -) -_sym_db.RegisterMessage(ListTimeSeriesRequest) - -ListTimeSeriesResponse = _reflection.GeneratedProtocolMessageType( - "ListTimeSeriesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTTIMESERIESRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``ListTimeSeries`` response. - - - Attributes: - time_series: - One or more time series that match the filter included in the - request. - next_page_token: - If there are more results than have been returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - execution_errors: - Query execution errors that may have caused the time series - data returned to be incomplete. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListTimeSeriesResponse) - }, -) -_sym_db.RegisterMessage(ListTimeSeriesResponse) - -CreateTimeSeriesRequest = _reflection.GeneratedProtocolMessageType( - "CreateTimeSeriesRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATETIMESERIESREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``CreateTimeSeries`` request. - - - Attributes: - name: - Required. The project on which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] - time_series: - Required. The new data to be added to a list of time series. - Adds at most one data point to each of several time series. - The new data point must be more recent than any other point in - its time series. Each ``TimeSeries`` value must fully specify - a unique time series by supplying all label values for the - metric and the monitored resource. The maximum number of - ``TimeSeries`` objects per ``Create`` request is 200. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateTimeSeriesRequest) - }, -) -_sym_db.RegisterMessage(CreateTimeSeriesRequest) - -CreateTimeSeriesError = _reflection.GeneratedProtocolMessageType( - "CreateTimeSeriesError", - (_message.Message,), - { - "DESCRIPTOR": _CREATETIMESERIESERROR, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """DEPRECATED. Used to hold per-time-series error status. - - - Attributes: - time_series: - DEPRECATED. Time series ID that resulted in the ``status`` - error. - status: - DEPRECATED. The status of the requested write operation for - ``time_series``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateTimeSeriesError) - }, -) -_sym_db.RegisterMessage(CreateTimeSeriesError) - -CreateTimeSeriesSummary = _reflection.GeneratedProtocolMessageType( - "CreateTimeSeriesSummary", - (_message.Message,), - { - "Error": _reflection.GeneratedProtocolMessageType( - "Error", - (_message.Message,), - { - "DESCRIPTOR": _CREATETIMESERIESSUMMARY_ERROR, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """Detailed information about an error category. - - - Attributes: - status: - The status of the requested write operation. - point_count: - The number of points that couldn’t be written because of - ``status``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateTimeSeriesSummary.Error) - }, - ), - "DESCRIPTOR": _CREATETIMESERIESSUMMARY, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """Summary of the result of a failed request to write data to - a time series. - - - Attributes: - total_point_count: - The number of points in the request. - success_point_count: - The number of points that were successfully written. - errors: - The number of points that failed to be written. Order is not - guaranteed. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateTimeSeriesSummary) - }, -) -_sym_db.RegisterMessage(CreateTimeSeriesSummary) -_sym_db.RegisterMessage(CreateTimeSeriesSummary.Error) - -QueryTimeSeriesRequest = _reflection.GeneratedProtocolMessageType( - "QueryTimeSeriesRequest", - (_message.Message,), - { - "DESCRIPTOR": _QUERYTIMESERIESREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``QueryTimeSeries`` request. - - - Attributes: - name: - Required. The project on which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] - query: - Required. The query in the monitoring query language format. - The default time zone is in UTC. - page_size: - A positive number that is the maximum number of - time_series_data to return. - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return - additional results from the previous method call. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.QueryTimeSeriesRequest) - }, -) -_sym_db.RegisterMessage(QueryTimeSeriesRequest) - -QueryTimeSeriesResponse = _reflection.GeneratedProtocolMessageType( - "QueryTimeSeriesResponse", - (_message.Message,), - { - "DESCRIPTOR": _QUERYTIMESERIESRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """The ``QueryTimeSeries`` response. - - - Attributes: - time_series_descriptor: - The descriptor for the time series data. - time_series_data: - The time series data. - next_page_token: - If there are more results than have been returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - partial_errors: - Query execution errors that may have caused the time series - data returned to be incomplete. The available data will be - available in the response. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.QueryTimeSeriesResponse) - }, -) -_sym_db.RegisterMessage(QueryTimeSeriesResponse) - -QueryErrorList = _reflection.GeneratedProtocolMessageType( - "QueryErrorList", - (_message.Message,), - { - "DESCRIPTOR": _QUERYERRORLIST, - "__module__": "google.cloud.monitoring_v3.proto.metric_service_pb2", - "__doc__": """This is an error detail intended to be used with - INVALID_ARGUMENT errors. - - - Attributes: - errors: - Errors in parsing the time series query language text. The - number of errors in the response may be limited. - error_summary: - A summary of all the errors. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.QueryErrorList) - }, -) -_sym_db.RegisterMessage(QueryErrorList) - - -DESCRIPTOR._options = None -_LISTMONITOREDRESOURCEDESCRIPTORSREQUEST.fields_by_name["name"]._options = None -_GETMONITOREDRESOURCEDESCRIPTORREQUEST.fields_by_name["name"]._options = None -_LISTMETRICDESCRIPTORSREQUEST.fields_by_name["name"]._options = None -_GETMETRICDESCRIPTORREQUEST.fields_by_name["name"]._options = None -_CREATEMETRICDESCRIPTORREQUEST.fields_by_name["name"]._options = None -_CREATEMETRICDESCRIPTORREQUEST.fields_by_name["metric_descriptor"]._options = None -_DELETEMETRICDESCRIPTORREQUEST.fields_by_name["name"]._options = None -_LISTTIMESERIESREQUEST.fields_by_name["name"]._options = None -_LISTTIMESERIESREQUEST.fields_by_name["filter"]._options = None -_LISTTIMESERIESREQUEST.fields_by_name["interval"]._options = None -_LISTTIMESERIESREQUEST.fields_by_name["view"]._options = None -_CREATETIMESERIESREQUEST.fields_by_name["name"]._options = None -_CREATETIMESERIESREQUEST.fields_by_name["time_series"]._options = None -_CREATETIMESERIESERROR.fields_by_name["time_series"]._options = None -_CREATETIMESERIESERROR.fields_by_name["status"]._options = None - -_METRICSERVICE = _descriptor.ServiceDescriptor( - name="MetricService", - full_name="google.monitoring.v3.MetricService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\031monitoring.googleapis.com\322A\272\001https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.read,https://www.googleapis.com/auth/monitoring.write", - serialized_start=3024, - serialized_end=4750, - methods=[ - _descriptor.MethodDescriptor( - name="ListMonitoredResourceDescriptors", - full_name="google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors", - index=0, - containing_service=None, - input_type=_LISTMONITOREDRESOURCEDESCRIPTORSREQUEST, - output_type=_LISTMONITOREDRESOURCEDESCRIPTORSRESPONSE, - serialized_options=b"\202\323\344\223\0024\0222/v3/{name=projects/*}/monitoredResourceDescriptors\332A\004name", - ), - _descriptor.MethodDescriptor( - name="GetMonitoredResourceDescriptor", - full_name="google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor", - index=1, - containing_service=None, - input_type=_GETMONITOREDRESOURCEDESCRIPTORREQUEST, - output_type=google_dot_api_dot_monitored__resource__pb2._MONITOREDRESOURCEDESCRIPTOR, - serialized_options=b"\202\323\344\223\0027\0225/v3/{name=projects/*/monitoredResourceDescriptors/**}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="ListMetricDescriptors", - full_name="google.monitoring.v3.MetricService.ListMetricDescriptors", - index=2, - containing_service=None, - input_type=_LISTMETRICDESCRIPTORSREQUEST, - output_type=_LISTMETRICDESCRIPTORSRESPONSE, - serialized_options=b"\202\323\344\223\002)\022'/v3/{name=projects/*}/metricDescriptors\332A\004name", - ), - _descriptor.MethodDescriptor( - name="GetMetricDescriptor", - full_name="google.monitoring.v3.MetricService.GetMetricDescriptor", - index=3, - containing_service=None, - input_type=_GETMETRICDESCRIPTORREQUEST, - output_type=google_dot_api_dot_metric__pb2._METRICDESCRIPTOR, - serialized_options=b"\202\323\344\223\002,\022*/v3/{name=projects/*/metricDescriptors/**}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="CreateMetricDescriptor", - full_name="google.monitoring.v3.MetricService.CreateMetricDescriptor", - index=4, - containing_service=None, - input_type=_CREATEMETRICDESCRIPTORREQUEST, - output_type=google_dot_api_dot_metric__pb2._METRICDESCRIPTOR, - serialized_options=b"\202\323\344\223\002<\"'/v3/{name=projects/*}/metricDescriptors:\021metric_descriptor\332A\026name,metric_descriptor", - ), - _descriptor.MethodDescriptor( - name="DeleteMetricDescriptor", - full_name="google.monitoring.v3.MetricService.DeleteMetricDescriptor", - index=5, - containing_service=None, - input_type=_DELETEMETRICDESCRIPTORREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002,**/v3/{name=projects/*/metricDescriptors/**}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="ListTimeSeries", - full_name="google.monitoring.v3.MetricService.ListTimeSeries", - index=6, - containing_service=None, - input_type=_LISTTIMESERIESREQUEST, - output_type=_LISTTIMESERIESRESPONSE, - serialized_options=b'\202\323\344\223\002"\022 /v3/{name=projects/*}/timeSeries\332A\031name,filter,interval,view', - ), - _descriptor.MethodDescriptor( - name="CreateTimeSeries", - full_name="google.monitoring.v3.MetricService.CreateTimeSeries", - index=7, - containing_service=None, - input_type=_CREATETIMESERIESREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b'\202\323\344\223\002%" /v3/{name=projects/*}/timeSeries:\001*\332A\020name,time_series', - ), - ], -) -_sym_db.RegisterServiceDescriptor(_METRICSERVICE) - -DESCRIPTOR.services_by_name["MetricService"] = _METRICSERVICE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/metric_service_pb2_grpc.py b/google/cloud/monitoring_v3/proto/metric_service_pb2_grpc.py deleted file mode 100644 index 8bcfc6fc..00000000 --- a/google/cloud/monitoring_v3/proto/metric_service_pb2_grpc.py +++ /dev/null @@ -1,182 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.api import metric_pb2 as google_dot_api_dot_metric__pb2 -from google.api import ( - monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2, -) -from google.cloud.monitoring_v3.proto import ( - metric_service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class MetricServiceStub(object): - """Manages metric descriptors, monitored resource descriptors, and - time series data. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ListMonitoredResourceDescriptors = channel.unary_unary( - "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMonitoredResourceDescriptorsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMonitoredResourceDescriptorsResponse.FromString, - ) - self.GetMonitoredResourceDescriptor = channel.unary_unary( - "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.GetMonitoredResourceDescriptorRequest.SerializeToString, - response_deserializer=google_dot_api_dot_monitored__resource__pb2.MonitoredResourceDescriptor.FromString, - ) - self.ListMetricDescriptors = channel.unary_unary( - "/google.monitoring.v3.MetricService/ListMetricDescriptors", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMetricDescriptorsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMetricDescriptorsResponse.FromString, - ) - self.GetMetricDescriptor = channel.unary_unary( - "/google.monitoring.v3.MetricService/GetMetricDescriptor", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.GetMetricDescriptorRequest.SerializeToString, - response_deserializer=google_dot_api_dot_metric__pb2.MetricDescriptor.FromString, - ) - self.CreateMetricDescriptor = channel.unary_unary( - "/google.monitoring.v3.MetricService/CreateMetricDescriptor", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.CreateMetricDescriptorRequest.SerializeToString, - response_deserializer=google_dot_api_dot_metric__pb2.MetricDescriptor.FromString, - ) - self.DeleteMetricDescriptor = channel.unary_unary( - "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.DeleteMetricDescriptorRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ListTimeSeries = channel.unary_unary( - "/google.monitoring.v3.MetricService/ListTimeSeries", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListTimeSeriesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListTimeSeriesResponse.FromString, - ) - self.CreateTimeSeries = channel.unary_unary( - "/google.monitoring.v3.MetricService/CreateTimeSeries", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.CreateTimeSeriesRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class MetricServiceServicer(object): - """Manages metric descriptors, monitored resource descriptors, and - time series data. - """ - - def ListMonitoredResourceDescriptors(self, request, context): - """Lists monitored resource descriptors that match a filter. This method does not require a Workspace. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetMonitoredResourceDescriptor(self, request, context): - """Gets a single monitored resource descriptor. This method does not require a Workspace. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListMetricDescriptors(self, request, context): - """Lists metric descriptors that match a filter. This method does not require a Workspace. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetMetricDescriptor(self, request, context): - """Gets a single metric descriptor. This method does not require a Workspace. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateMetricDescriptor(self, request, context): - """Creates a new metric descriptor. - User-created metric descriptors define - [custom metrics](https://cloud.google.com/monitoring/custom-metrics). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteMetricDescriptor(self, request, context): - """Deletes a metric descriptor. Only user-created - [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be - deleted. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListTimeSeries(self, request, context): - """Lists time series that match a filter. This method does not require a Workspace. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateTimeSeries(self, request, context): - """Creates or adds data to one or more time series. - The response is empty if all time series in the request were written. - If any time series could not be written, a corresponding failure message is - included in the error response. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_MetricServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListMonitoredResourceDescriptors": grpc.unary_unary_rpc_method_handler( - servicer.ListMonitoredResourceDescriptors, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMonitoredResourceDescriptorsRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMonitoredResourceDescriptorsResponse.SerializeToString, - ), - "GetMonitoredResourceDescriptor": grpc.unary_unary_rpc_method_handler( - servicer.GetMonitoredResourceDescriptor, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.GetMonitoredResourceDescriptorRequest.FromString, - response_serializer=google_dot_api_dot_monitored__resource__pb2.MonitoredResourceDescriptor.SerializeToString, - ), - "ListMetricDescriptors": grpc.unary_unary_rpc_method_handler( - servicer.ListMetricDescriptors, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMetricDescriptorsRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMetricDescriptorsResponse.SerializeToString, - ), - "GetMetricDescriptor": grpc.unary_unary_rpc_method_handler( - servicer.GetMetricDescriptor, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.GetMetricDescriptorRequest.FromString, - response_serializer=google_dot_api_dot_metric__pb2.MetricDescriptor.SerializeToString, - ), - "CreateMetricDescriptor": grpc.unary_unary_rpc_method_handler( - servicer.CreateMetricDescriptor, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.CreateMetricDescriptorRequest.FromString, - response_serializer=google_dot_api_dot_metric__pb2.MetricDescriptor.SerializeToString, - ), - "DeleteMetricDescriptor": grpc.unary_unary_rpc_method_handler( - servicer.DeleteMetricDescriptor, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.DeleteMetricDescriptorRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ListTimeSeries": grpc.unary_unary_rpc_method_handler( - servicer.ListTimeSeries, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListTimeSeriesRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListTimeSeriesResponse.SerializeToString, - ), - "CreateTimeSeries": grpc.unary_unary_rpc_method_handler( - servicer.CreateTimeSeries, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.CreateTimeSeriesRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.monitoring.v3.MetricService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/google/cloud/monitoring_v3/proto/mutation_record_pb2.py b/google/cloud/monitoring_v3/proto/mutation_record_pb2.py deleted file mode 100644 index 542409a4..00000000 --- a/google/cloud/monitoring_v3/proto/mutation_record_pb2.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/mutation_record.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/mutation_record.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\023MutationRecordProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n6google/cloud/monitoring_v3/proto/mutation_record.proto\x12\x14google.monitoring.v3\x1a\x1fgoogle/protobuf/timestamp.proto"U\n\x0eMutationRecord\x12/\n\x0bmutate_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\nmutated_by\x18\x02 \x01(\tB\xcb\x01\n\x18\x63om.google.monitoring.v3B\x13MutationRecordProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR], -) - - -_MUTATIONRECORD = _descriptor.Descriptor( - name="MutationRecord", - full_name="google.monitoring.v3.MutationRecord", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="mutate_time", - full_name="google.monitoring.v3.MutationRecord.mutate_time", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mutated_by", - full_name="google.monitoring.v3.MutationRecord.mutated_by", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=113, - serialized_end=198, -) - -_MUTATIONRECORD.fields_by_name[ - "mutate_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["MutationRecord"] = _MUTATIONRECORD -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -MutationRecord = _reflection.GeneratedProtocolMessageType( - "MutationRecord", - (_message.Message,), - { - "DESCRIPTOR": _MUTATIONRECORD, - "__module__": "google.cloud.monitoring_v3.proto.mutation_record_pb2", - "__doc__": """Describes a change made to a configuration. - - - Attributes: - mutate_time: - When the change occurred. - mutated_by: - The email address of the user making the change. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.MutationRecord) - }, -) -_sym_db.RegisterMessage(MutationRecord) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/mutation_record_pb2_grpc.py b/google/cloud/monitoring_v3/proto/mutation_record_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/mutation_record_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/notification_pb2.py b/google/cloud/monitoring_v3/proto/notification_pb2.py deleted file mode 100644 index 67d72489..00000000 --- a/google/cloud/monitoring_v3/proto/notification_pb2.py +++ /dev/null @@ -1,678 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/notification.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import label_pb2 as google_dot_api_dot_label__pb2 -from google.api import launch_stage_pb2 as google_dot_api_dot_launch__stage__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.monitoring_v3.proto import ( - common_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2, -) -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/notification.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\021NotificationProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n3google/cloud/monitoring_v3/proto/notification.proto\x12\x14google.monitoring.v3\x1a\x16google/api/label.proto\x1a\x1dgoogle/api/launch_stage.proto\x1a\x19google/api/resource.proto\x1a-google/cloud/monitoring_v3/proto/common.proto\x1a\x1egoogle/protobuf/wrappers.proto"\xa5\x04\n\x1dNotificationChannelDescriptor\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12+\n\x06labels\x18\x04 \x03(\x0b\x32\x1b.google.api.LabelDescriptor\x12>\n\x0fsupported_tiers\x18\x05 \x03(\x0e\x32!.google.monitoring.v3.ServiceTierB\x02\x18\x01\x12-\n\x0claunch_stage\x18\x07 \x01(\x0e\x32\x17.google.api.LaunchStage:\xa0\x02\xea\x41\x9c\x02\n7monitoring.googleapis.com/NotificationChannelDescriptor\x12\x46projects/{project}/notificationChannelDescriptors/{channel_descriptor}\x12Porganizations/{organization}/notificationChannelDescriptors/{channel_descriptor}\x12\x44\x66olders/{folder}/notificationChannelDescriptors/{channel_descriptor}\x12\x01*"\xb7\x06\n\x13NotificationChannel\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x45\n\x06labels\x18\x05 \x03(\x0b\x32\x35.google.monitoring.v3.NotificationChannel.LabelsEntry\x12N\n\x0buser_labels\x18\x08 \x03(\x0b\x32\x39.google.monitoring.v3.NotificationChannel.UserLabelsEntry\x12Y\n\x13verification_status\x18\t \x01(\x0e\x32<.google.monitoring.v3.NotificationChannel.VerificationStatus\x12+\n\x07\x65nabled\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fUserLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"W\n\x12VerificationStatus\x12#\n\x1fVERIFICATION_STATUS_UNSPECIFIED\x10\x00\x12\x0e\n\nUNVERIFIED\x10\x01\x12\x0c\n\x08VERIFIED\x10\x02:\xfe\x01\xea\x41\xfa\x01\n-monitoring.googleapis.com/NotificationChannel\x12>projects/{project}/notificationChannels/{notification_channel}\x12Horganizations/{organization}/notificationChannels/{notification_channel}\x12google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_label__pb2.DESCRIPTOR, - google_dot_api_dot_launch__stage__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, - ], -) - - -_NOTIFICATIONCHANNEL_VERIFICATIONSTATUS = _descriptor.EnumDescriptor( - name="VerificationStatus", - full_name="google.monitoring.v3.NotificationChannel.VerificationStatus", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="VERIFICATION_STATUS_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="UNVERIFIED", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="VERIFIED", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1270, - serialized_end=1357, -) -_sym_db.RegisterEnumDescriptor(_NOTIFICATIONCHANNEL_VERIFICATIONSTATUS) - - -_NOTIFICATIONCHANNELDESCRIPTOR = _descriptor.Descriptor( - name="NotificationChannelDescriptor", - full_name="google.monitoring.v3.NotificationChannelDescriptor", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.NotificationChannelDescriptor.name", - index=0, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.monitoring.v3.NotificationChannelDescriptor.type", - index=1, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.NotificationChannelDescriptor.display_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.monitoring.v3.NotificationChannelDescriptor.description", - index=3, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.monitoring.v3.NotificationChannelDescriptor.labels", - index=4, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="supported_tiers", - full_name="google.monitoring.v3.NotificationChannelDescriptor.supported_tiers", - index=5, - number=5, - type=14, - cpp_type=8, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="launch_stage", - full_name="google.monitoring.v3.NotificationChannelDescriptor.launch_stage", - index=6, - number=7, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"\352A\234\002\n7monitoring.googleapis.com/NotificationChannelDescriptor\022Fprojects/{project}/notificationChannelDescriptors/{channel_descriptor}\022Porganizations/{organization}/notificationChannelDescriptors/{channel_descriptor}\022Dfolders/{folder}/notificationChannelDescriptors/{channel_descriptor}\022\001*", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=239, - serialized_end=788, -) - - -_NOTIFICATIONCHANNEL_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.monitoring.v3.NotificationChannel.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.monitoring.v3.NotificationChannel.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.monitoring.v3.NotificationChannel.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1172, - serialized_end=1217, -) - -_NOTIFICATIONCHANNEL_USERLABELSENTRY = _descriptor.Descriptor( - name="UserLabelsEntry", - full_name="google.monitoring.v3.NotificationChannel.UserLabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.monitoring.v3.NotificationChannel.UserLabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.monitoring.v3.NotificationChannel.UserLabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1219, - serialized_end=1268, -) - -_NOTIFICATIONCHANNEL = _descriptor.Descriptor( - name="NotificationChannel", - full_name="google.monitoring.v3.NotificationChannel", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="type", - full_name="google.monitoring.v3.NotificationChannel.type", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.NotificationChannel.name", - index=1, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.NotificationChannel.display_name", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.monitoring.v3.NotificationChannel.description", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.monitoring.v3.NotificationChannel.labels", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="user_labels", - full_name="google.monitoring.v3.NotificationChannel.user_labels", - index=5, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="verification_status", - full_name="google.monitoring.v3.NotificationChannel.verification_status", - index=6, - number=9, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="enabled", - full_name="google.monitoring.v3.NotificationChannel.enabled", - index=7, - number=11, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _NOTIFICATIONCHANNEL_LABELSENTRY, - _NOTIFICATIONCHANNEL_USERLABELSENTRY, - ], - enum_types=[_NOTIFICATIONCHANNEL_VERIFICATIONSTATUS], - serialized_options=b"\352A\372\001\n-monitoring.googleapis.com/NotificationChannel\022>projects/{project}/notificationChannels/{notification_channel}\022Horganizations/{organization}/notificationChannels/{notification_channel}\022google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n;google/cloud/monitoring_v3/proto/notification_service.proto\x12\x14google.monitoring.v3\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/monitoring_v3/proto/notification.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xa1\x01\n)ListNotificationChannelDescriptorsRequest\x12M\n\x04name\x18\x04 \x01(\tB?\xe0\x41\x02\xfa\x41\x39\x12\x37monitoring.googleapis.com/NotificationChannelDescriptor\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"\x97\x01\n*ListNotificationChannelDescriptorsResponse\x12P\n\x13\x63hannel_descriptors\x18\x01 \x03(\x0b\x32\x33.google.monitoring.v3.NotificationChannelDescriptor\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"x\n\'GetNotificationChannelDescriptorRequest\x12M\n\x04name\x18\x03 \x01(\tB?\xe0\x41\x02\xfa\x41\x39\n7monitoring.googleapis.com/NotificationChannelDescriptor"\xb5\x01\n CreateNotificationChannelRequest\x12\x43\n\x04name\x18\x03 \x01(\tB5\xe0\x41\x02\xfa\x41/\x12-monitoring.googleapis.com/NotificationChannel\x12L\n\x14notification_channel\x18\x02 \x01(\x0b\x32).google.monitoring.v3.NotificationChannelB\x03\xe0\x41\x02"\xaf\x01\n\x1fListNotificationChannelsRequest\x12\x43\n\x04name\x18\x05 \x01(\tB5\xe0\x41\x02\xfa\x41/\x12-monitoring.googleapis.com/NotificationChannel\x12\x0e\n\x06\x66ilter\x18\x06 \x01(\t\x12\x10\n\x08order_by\x18\x07 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"\x85\x01\n ListNotificationChannelsResponse\x12H\n\x15notification_channels\x18\x03 \x03(\x0b\x32).google.monitoring.v3.NotificationChannel\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"d\n\x1dGetNotificationChannelRequest\x12\x43\n\x04name\x18\x03 \x01(\tB5\xe0\x41\x02\xfa\x41/\n-monitoring.googleapis.com/NotificationChannel"\xa1\x01\n UpdateNotificationChannelRequest\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12L\n\x14notification_channel\x18\x03 \x01(\x0b\x32).google.monitoring.v3.NotificationChannelB\x03\xe0\x41\x02"v\n DeleteNotificationChannelRequest\x12\x43\n\x04name\x18\x03 \x01(\tB5\xe0\x41\x02\xfa\x41/\n-monitoring.googleapis.com/NotificationChannel\x12\r\n\x05\x66orce\x18\x05 \x01(\x08"u\n.SendNotificationChannelVerificationCodeRequest\x12\x43\n\x04name\x18\x01 \x01(\tB5\xe0\x41\x02\xfa\x41/\n-monitoring.googleapis.com/NotificationChannel"\xa5\x01\n-GetNotificationChannelVerificationCodeRequest\x12\x43\n\x04name\x18\x01 \x01(\tB5\xe0\x41\x02\xfa\x41/\n-monitoring.googleapis.com/NotificationChannel\x12/\n\x0b\x65xpire_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"o\n.GetNotificationChannelVerificationCodeResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\t\x12/\n\x0b\x65xpire_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"z\n VerifyNotificationChannelRequest\x12\x43\n\x04name\x18\x01 \x01(\tB5\xe0\x41\x02\xfa\x41/\n-monitoring.googleapis.com/NotificationChannel\x12\x11\n\x04\x63ode\x18\x02 \x01(\tB\x03\xe0\x41\x02\x32\xea\x12\n\x1aNotificationChannelService\x12\xec\x01\n"ListNotificationChannelDescriptors\x12?.google.monitoring.v3.ListNotificationChannelDescriptorsRequest\x1a@.google.monitoring.v3.ListNotificationChannelDescriptorsResponse"C\x82\xd3\xe4\x93\x02\x36\x12\x34/v3/{name=projects/*}/notificationChannelDescriptors\xda\x41\x04name\x12\xdd\x01\n GetNotificationChannelDescriptor\x12=.google.monitoring.v3.GetNotificationChannelDescriptorRequest\x1a\x33.google.monitoring.v3.NotificationChannelDescriptor"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v3/{name=projects/*/notificationChannelDescriptors/*}\xda\x41\x04name\x12\xc4\x01\n\x18ListNotificationChannels\x12\x35.google.monitoring.v3.ListNotificationChannelsRequest\x1a\x36.google.monitoring.v3.ListNotificationChannelsResponse"9\x82\xd3\xe4\x93\x02,\x12*/v3/{name=projects/*}/notificationChannels\xda\x41\x04name\x12\xb5\x01\n\x16GetNotificationChannel\x12\x33.google.monitoring.v3.GetNotificationChannelRequest\x1a).google.monitoring.v3.NotificationChannel";\x82\xd3\xe4\x93\x02.\x12,/v3/{name=projects/*/notificationChannels/*}\xda\x41\x04name\x12\xe4\x01\n\x19\x43reateNotificationChannel\x12\x36.google.monitoring.v3.CreateNotificationChannelRequest\x1a).google.monitoring.v3.NotificationChannel"d\x82\xd3\xe4\x93\x02\x42"*/v3/{name=projects/*}/notificationChannels:\x14notification_channel\xda\x41\x19name,notification_channel\x12\x83\x02\n\x19UpdateNotificationChannel\x12\x36.google.monitoring.v3.UpdateNotificationChannelRequest\x1a).google.monitoring.v3.NotificationChannel"\x82\x01\x82\xd3\xe4\x93\x02Y2A/v3/{notification_channel.name=projects/*/notificationChannels/*}:\x14notification_channel\xda\x41 update_mask,notification_channel\x12\xae\x01\n\x19\x44\x65leteNotificationChannel\x12\x36.google.monitoring.v3.DeleteNotificationChannelRequest\x1a\x16.google.protobuf.Empty"A\x82\xd3\xe4\x93\x02.*,/v3/{name=projects/*/notificationChannels/*}\xda\x41\nname,force\x12\xdc\x01\n\'SendNotificationChannelVerificationCode\x12\x44.google.monitoring.v3.SendNotificationChannelVerificationCodeRequest\x1a\x16.google.protobuf.Empty"S\x82\xd3\xe4\x93\x02\x46"A/v3/{name=projects/*/notificationChannels/*}:sendVerificationCode:\x01*\xda\x41\x04name\x12\x87\x02\n&GetNotificationChannelVerificationCode\x12\x43.google.monitoring.v3.GetNotificationChannelVerificationCodeRequest\x1a\x44.google.monitoring.v3.GetNotificationChannelVerificationCodeResponse"R\x82\xd3\xe4\x93\x02\x45"@/v3/{name=projects/*/notificationChannels/*}:getVerificationCode:\x01*\xda\x41\x04name\x12\xca\x01\n\x19VerifyNotificationChannel\x12\x36.google.monitoring.v3.VerifyNotificationChannelRequest\x1a).google.monitoring.v3.NotificationChannel"J\x82\xd3\xe4\x93\x02\x38"3/v3/{name=projects/*/notificationChannels/*}:verify:\x01*\xda\x41\tname,code\x1a\xa9\x01\xca\x41\x19monitoring.googleapis.com\xd2\x41\x89\x01https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.readB\xd0\x01\n\x18\x63om.google.monitoring.v3B\x18NotificationServiceProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_LISTNOTIFICATIONCHANNELDESCRIPTORSREQUEST = _descriptor.Descriptor( - name="ListNotificationChannelDescriptorsRequest", - full_name="google.monitoring.v3.ListNotificationChannelDescriptorsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ListNotificationChannelDescriptorsRequest.name", - index=0, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A9\0227monitoring.googleapis.com/NotificationChannelDescriptor", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListNotificationChannelDescriptorsRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListNotificationChannelDescriptorsRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=380, - serialized_end=541, -) - - -_LISTNOTIFICATIONCHANNELDESCRIPTORSRESPONSE = _descriptor.Descriptor( - name="ListNotificationChannelDescriptorsResponse", - full_name="google.monitoring.v3.ListNotificationChannelDescriptorsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="channel_descriptors", - full_name="google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListNotificationChannelDescriptorsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=544, - serialized_end=695, -) - - -_GETNOTIFICATIONCHANNELDESCRIPTORREQUEST = _descriptor.Descriptor( - name="GetNotificationChannelDescriptorRequest", - full_name="google.monitoring.v3.GetNotificationChannelDescriptorRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetNotificationChannelDescriptorRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A9\n7monitoring.googleapis.com/NotificationChannelDescriptor", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=697, - serialized_end=817, -) - - -_CREATENOTIFICATIONCHANNELREQUEST = _descriptor.Descriptor( - name="CreateNotificationChannelRequest", - full_name="google.monitoring.v3.CreateNotificationChannelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.CreateNotificationChannelRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A/\022-monitoring.googleapis.com/NotificationChannel", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="notification_channel", - full_name="google.monitoring.v3.CreateNotificationChannelRequest.notification_channel", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=820, - serialized_end=1001, -) - - -_LISTNOTIFICATIONCHANNELSREQUEST = _descriptor.Descriptor( - name="ListNotificationChannelsRequest", - full_name="google.monitoring.v3.ListNotificationChannelsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ListNotificationChannelsRequest.name", - index=0, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A/\022-monitoring.googleapis.com/NotificationChannel", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.ListNotificationChannelsRequest.filter", - index=1, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.monitoring.v3.ListNotificationChannelsRequest.order_by", - index=2, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListNotificationChannelsRequest.page_size", - index=3, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListNotificationChannelsRequest.page_token", - index=4, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1004, - serialized_end=1179, -) - - -_LISTNOTIFICATIONCHANNELSRESPONSE = _descriptor.Descriptor( - name="ListNotificationChannelsResponse", - full_name="google.monitoring.v3.ListNotificationChannelsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="notification_channels", - full_name="google.monitoring.v3.ListNotificationChannelsResponse.notification_channels", - index=0, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListNotificationChannelsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1182, - serialized_end=1315, -) - - -_GETNOTIFICATIONCHANNELREQUEST = _descriptor.Descriptor( - name="GetNotificationChannelRequest", - full_name="google.monitoring.v3.GetNotificationChannelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetNotificationChannelRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A/\n-monitoring.googleapis.com/NotificationChannel", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1317, - serialized_end=1417, -) - - -_UPDATENOTIFICATIONCHANNELREQUEST = _descriptor.Descriptor( - name="UpdateNotificationChannelRequest", - full_name="google.monitoring.v3.UpdateNotificationChannelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.monitoring.v3.UpdateNotificationChannelRequest.update_mask", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="notification_channel", - full_name="google.monitoring.v3.UpdateNotificationChannelRequest.notification_channel", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1420, - serialized_end=1581, -) - - -_DELETENOTIFICATIONCHANNELREQUEST = _descriptor.Descriptor( - name="DeleteNotificationChannelRequest", - full_name="google.monitoring.v3.DeleteNotificationChannelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.DeleteNotificationChannelRequest.name", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A/\n-monitoring.googleapis.com/NotificationChannel", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="force", - full_name="google.monitoring.v3.DeleteNotificationChannelRequest.force", - index=1, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1583, - serialized_end=1701, -) - - -_SENDNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST = _descriptor.Descriptor( - name="SendNotificationChannelVerificationCodeRequest", - full_name="google.monitoring.v3.SendNotificationChannelVerificationCodeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.SendNotificationChannelVerificationCodeRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A/\n-monitoring.googleapis.com/NotificationChannel", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1703, - serialized_end=1820, -) - - -_GETNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST = _descriptor.Descriptor( - name="GetNotificationChannelVerificationCodeRequest", - full_name="google.monitoring.v3.GetNotificationChannelVerificationCodeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A/\n-monitoring.googleapis.com/NotificationChannel", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="expire_time", - full_name="google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.expire_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1823, - serialized_end=1988, -) - - -_GETNOTIFICATIONCHANNELVERIFICATIONCODERESPONSE = _descriptor.Descriptor( - name="GetNotificationChannelVerificationCodeResponse", - full_name="google.monitoring.v3.GetNotificationChannelVerificationCodeResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="code", - full_name="google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.code", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="expire_time", - full_name="google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.expire_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1990, - serialized_end=2101, -) - - -_VERIFYNOTIFICATIONCHANNELREQUEST = _descriptor.Descriptor( - name="VerifyNotificationChannelRequest", - full_name="google.monitoring.v3.VerifyNotificationChannelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.VerifyNotificationChannelRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A/\n-monitoring.googleapis.com/NotificationChannel", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="code", - full_name="google.monitoring.v3.VerifyNotificationChannelRequest.code", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2103, - serialized_end=2225, -) - -_LISTNOTIFICATIONCHANNELDESCRIPTORSRESPONSE.fields_by_name[ - "channel_descriptors" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNELDESCRIPTOR -) -_CREATENOTIFICATIONCHANNELREQUEST.fields_by_name[ - "notification_channel" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNEL -) -_LISTNOTIFICATIONCHANNELSRESPONSE.fields_by_name[ - "notification_channels" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNEL -) -_UPDATENOTIFICATIONCHANNELREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_UPDATENOTIFICATIONCHANNELREQUEST.fields_by_name[ - "notification_channel" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNEL -) -_GETNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST.fields_by_name[ - "expire_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_GETNOTIFICATIONCHANNELVERIFICATIONCODERESPONSE.fields_by_name[ - "expire_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name[ - "ListNotificationChannelDescriptorsRequest" -] = _LISTNOTIFICATIONCHANNELDESCRIPTORSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListNotificationChannelDescriptorsResponse" -] = _LISTNOTIFICATIONCHANNELDESCRIPTORSRESPONSE -DESCRIPTOR.message_types_by_name[ - "GetNotificationChannelDescriptorRequest" -] = _GETNOTIFICATIONCHANNELDESCRIPTORREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateNotificationChannelRequest" -] = _CREATENOTIFICATIONCHANNELREQUEST -DESCRIPTOR.message_types_by_name[ - "ListNotificationChannelsRequest" -] = _LISTNOTIFICATIONCHANNELSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListNotificationChannelsResponse" -] = _LISTNOTIFICATIONCHANNELSRESPONSE -DESCRIPTOR.message_types_by_name[ - "GetNotificationChannelRequest" -] = _GETNOTIFICATIONCHANNELREQUEST -DESCRIPTOR.message_types_by_name[ - "UpdateNotificationChannelRequest" -] = _UPDATENOTIFICATIONCHANNELREQUEST -DESCRIPTOR.message_types_by_name[ - "DeleteNotificationChannelRequest" -] = _DELETENOTIFICATIONCHANNELREQUEST -DESCRIPTOR.message_types_by_name[ - "SendNotificationChannelVerificationCodeRequest" -] = _SENDNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST -DESCRIPTOR.message_types_by_name[ - "GetNotificationChannelVerificationCodeRequest" -] = _GETNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST -DESCRIPTOR.message_types_by_name[ - "GetNotificationChannelVerificationCodeResponse" -] = _GETNOTIFICATIONCHANNELVERIFICATIONCODERESPONSE -DESCRIPTOR.message_types_by_name[ - "VerifyNotificationChannelRequest" -] = _VERIFYNOTIFICATIONCHANNELREQUEST -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ListNotificationChannelDescriptorsRequest = _reflection.GeneratedProtocolMessageType( - "ListNotificationChannelDescriptorsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTNOTIFICATIONCHANNELDESCRIPTORSREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``ListNotificationChannelDescriptors`` request. - - - Attributes: - name: - Required. The REST resource name of the parent from which to - retrieve the notification channel descriptors. The expected - syntax is: :: projects/[PROJECT_ID_OR_NUMBER] Note that - this names the parent container in which to look for the - descriptors; to retrieve a single descriptor by name, use the - [GetNotificationChannelDescriptor][google.monitoring.v3.Notifi - cationChannelService.GetNotificationChannelDescriptor] - operation, instead. - page_size: - The maximum number of results to return in a single response. - If not set to a positive number, a reasonable value will be - chosen by the service. - page_token: - If non-empty, ``page_token`` must contain a value returned as - the ``next_page_token`` in a previous response to request the - next set of results. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListNotificationChannelDescriptorsRequest) - }, -) -_sym_db.RegisterMessage(ListNotificationChannelDescriptorsRequest) - -ListNotificationChannelDescriptorsResponse = _reflection.GeneratedProtocolMessageType( - "ListNotificationChannelDescriptorsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTNOTIFICATIONCHANNELDESCRIPTORSRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``ListNotificationChannelDescriptors`` response. - - - Attributes: - channel_descriptors: - The monitored resource descriptors supported for the specified - project, optionally filtered. - next_page_token: - If not empty, indicates that there may be more results that - match the request. Use the value in the ``page_token`` field - in a subsequent request to fetch the next set of results. If - empty, all results have been returned. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListNotificationChannelDescriptorsResponse) - }, -) -_sym_db.RegisterMessage(ListNotificationChannelDescriptorsResponse) - -GetNotificationChannelDescriptorRequest = _reflection.GeneratedProtocolMessageType( - "GetNotificationChannelDescriptorRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETNOTIFICATIONCHANNELDESCRIPTORREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``GetNotificationChannelDescriptor`` response. - - - Attributes: - name: - Required. The channel type for which to execute the request. - The format is: :: projects/[PROJECT_ID_OR_NUMBER]/notific - ationChannelDescriptors/[CHANNEL_TYPE] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetNotificationChannelDescriptorRequest) - }, -) -_sym_db.RegisterMessage(GetNotificationChannelDescriptorRequest) - -CreateNotificationChannelRequest = _reflection.GeneratedProtocolMessageType( - "CreateNotificationChannelRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATENOTIFICATIONCHANNELREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``CreateNotificationChannel`` request. - - - Attributes: - name: - Required. The project on which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] This names - the container into which the channel will be written, this - does not name the newly created channel. The resulting - channel’s name will have a normalized version of this field as - a prefix, but will add ``/notificationChannels/[CHANNEL_ID]`` - to identify the channel. - notification_channel: - Required. The definition of the ``NotificationChannel`` to - create. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateNotificationChannelRequest) - }, -) -_sym_db.RegisterMessage(CreateNotificationChannelRequest) - -ListNotificationChannelsRequest = _reflection.GeneratedProtocolMessageType( - "ListNotificationChannelsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTNOTIFICATIONCHANNELSREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``ListNotificationChannels`` request. - - - Attributes: - name: - Required. The project on which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] This names - the container in which to look for the notification channels; - it does not name a specific channel. To query a specific - channel by REST resource name, use the [``GetNotificationChann - el``][google.monitoring.v3.NotificationChannelService.GetNotif - icationChannel] operation. - filter: - If provided, this field specifies the criteria that must be - met by notification channels to be included in the response. - For more details, see `sorting and filtering - `__. - order_by: - A comma-separated list of fields by which to sort the result. - Supports the same set of fields as in ``filter``. Entries can - be prefixed with a minus sign to sort in descending rather - than ascending order. For more details, see `sorting and - filtering `__. - page_size: - The maximum number of results to return in a single response. - If not set to a positive number, a reasonable value will be - chosen by the service. - page_token: - If non-empty, ``page_token`` must contain a value returned as - the ``next_page_token`` in a previous response to request the - next set of results. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListNotificationChannelsRequest) - }, -) -_sym_db.RegisterMessage(ListNotificationChannelsRequest) - -ListNotificationChannelsResponse = _reflection.GeneratedProtocolMessageType( - "ListNotificationChannelsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTNOTIFICATIONCHANNELSRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``ListNotificationChannels`` response. - - - Attributes: - notification_channels: - The notification channels defined for the specified project. - next_page_token: - If not empty, indicates that there may be more results that - match the request. Use the value in the ``page_token`` field - in a subsequent request to fetch the next set of results. If - empty, all results have been returned. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListNotificationChannelsResponse) - }, -) -_sym_db.RegisterMessage(ListNotificationChannelsResponse) - -GetNotificationChannelRequest = _reflection.GeneratedProtocolMessageType( - "GetNotificationChannelRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETNOTIFICATIONCHANNELREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``GetNotificationChannel`` request. - - - Attributes: - name: - Required. The channel for which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER]/notificatio - nChannels/[CHANNEL_ID] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetNotificationChannelRequest) - }, -) -_sym_db.RegisterMessage(GetNotificationChannelRequest) - -UpdateNotificationChannelRequest = _reflection.GeneratedProtocolMessageType( - "UpdateNotificationChannelRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATENOTIFICATIONCHANNELREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``UpdateNotificationChannel`` request. - - - Attributes: - update_mask: - The fields to update. - notification_channel: - Required. A description of the changes to be applied to the - specified notification channel. The description must provide a - definition for fields to be updated; the names of these fields - should also be included in the ``update_mask``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UpdateNotificationChannelRequest) - }, -) -_sym_db.RegisterMessage(UpdateNotificationChannelRequest) - -DeleteNotificationChannelRequest = _reflection.GeneratedProtocolMessageType( - "DeleteNotificationChannelRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETENOTIFICATIONCHANNELREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``DeleteNotificationChannel`` request. - - - Attributes: - name: - Required. The channel for which to execute the request. The - format is: :: projects/[PROJECT_ID_OR_NUMBER]/notificatio - nChannels/[CHANNEL_ID] - force: - If true, the notification channel will be deleted regardless - of its use in alert policies (the policies will be updated to - remove the channel). If false, channels that are still - referenced by an existing alerting policy will fail to be - deleted in a delete operation. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteNotificationChannelRequest) - }, -) -_sym_db.RegisterMessage(DeleteNotificationChannelRequest) - -SendNotificationChannelVerificationCodeRequest = _reflection.GeneratedProtocolMessageType( - "SendNotificationChannelVerificationCodeRequest", - (_message.Message,), - { - "DESCRIPTOR": _SENDNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``SendNotificationChannelVerificationCode`` request. - - - Attributes: - name: - Required. The notification channel to which to send a - verification code. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.SendNotificationChannelVerificationCodeRequest) - }, -) -_sym_db.RegisterMessage(SendNotificationChannelVerificationCodeRequest) - -GetNotificationChannelVerificationCodeRequest = _reflection.GeneratedProtocolMessageType( - "GetNotificationChannelVerificationCodeRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``GetNotificationChannelVerificationCode`` request. - - - Attributes: - name: - Required. The notification channel for which a verification - code is to be generated and retrieved. This must name a - channel that is already verified; if the specified channel is - not verified, the request will fail. - expire_time: - The desired expiration time. If specified, the API will - guarantee that the returned code will not be valid after the - specified timestamp; however, the API cannot guarantee that - the returned code will be valid for at least as long as the - requested time (the API puts an upper bound on the amount of - time for which a code may be valid). If omitted, a default - expiration will be used, which may be less than the max - permissible expiration (so specifying an expiration may extend - the code’s lifetime over omitting an expiration, even though - the API does impose an upper limit on the maximum expiration - that is permitted). - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetNotificationChannelVerificationCodeRequest) - }, -) -_sym_db.RegisterMessage(GetNotificationChannelVerificationCodeRequest) - -GetNotificationChannelVerificationCodeResponse = _reflection.GeneratedProtocolMessageType( - "GetNotificationChannelVerificationCodeResponse", - (_message.Message,), - { - "DESCRIPTOR": _GETNOTIFICATIONCHANNELVERIFICATIONCODERESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``GetNotificationChannelVerificationCode`` request. - - - Attributes: - code: - The verification code, which may be used to verify other - channels that have an equivalent identity (i.e. other channels - of the same type with the same fingerprint such as other email - channels with the same email address or other sms channels - with the same number). - expire_time: - The expiration time associated with the code that was - returned. If an expiration was provided in the request, this - is the minimum of the requested expiration in the request and - the max permitted expiration. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetNotificationChannelVerificationCodeResponse) - }, -) -_sym_db.RegisterMessage(GetNotificationChannelVerificationCodeResponse) - -VerifyNotificationChannelRequest = _reflection.GeneratedProtocolMessageType( - "VerifyNotificationChannelRequest", - (_message.Message,), - { - "DESCRIPTOR": _VERIFYNOTIFICATIONCHANNELREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.notification_service_pb2", - "__doc__": """The ``VerifyNotificationChannel`` request. - - - Attributes: - name: - Required. The notification channel to verify. - code: - Required. The verification code that was delivered to the - channel as a result of invoking the - ``SendNotificationChannelVerificationCode`` API method or that - was retrieved from a verified channel via - ``GetNotificationChannelVerificationCode``. For example, one - might have “G-123456” or “TKNZGhhd2EyN3I1MnRnMjRv” (in - general, one is only guaranteed that the code is valid UTF-8; - one should not make any assumptions regarding the structure or - format of the code). - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.VerifyNotificationChannelRequest) - }, -) -_sym_db.RegisterMessage(VerifyNotificationChannelRequest) - - -DESCRIPTOR._options = None -_LISTNOTIFICATIONCHANNELDESCRIPTORSREQUEST.fields_by_name["name"]._options = None -_GETNOTIFICATIONCHANNELDESCRIPTORREQUEST.fields_by_name["name"]._options = None -_CREATENOTIFICATIONCHANNELREQUEST.fields_by_name["name"]._options = None -_CREATENOTIFICATIONCHANNELREQUEST.fields_by_name["notification_channel"]._options = None -_LISTNOTIFICATIONCHANNELSREQUEST.fields_by_name["name"]._options = None -_GETNOTIFICATIONCHANNELREQUEST.fields_by_name["name"]._options = None -_UPDATENOTIFICATIONCHANNELREQUEST.fields_by_name["notification_channel"]._options = None -_DELETENOTIFICATIONCHANNELREQUEST.fields_by_name["name"]._options = None -_SENDNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST.fields_by_name["name"]._options = None -_GETNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST.fields_by_name["name"]._options = None -_VERIFYNOTIFICATIONCHANNELREQUEST.fields_by_name["name"]._options = None -_VERIFYNOTIFICATIONCHANNELREQUEST.fields_by_name["code"]._options = None - -_NOTIFICATIONCHANNELSERVICE = _descriptor.ServiceDescriptor( - name="NotificationChannelService", - full_name="google.monitoring.v3.NotificationChannelService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\031monitoring.googleapis.com\322A\211\001https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.read", - serialized_start=2228, - serialized_end=4638, - methods=[ - _descriptor.MethodDescriptor( - name="ListNotificationChannelDescriptors", - full_name="google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors", - index=0, - containing_service=None, - input_type=_LISTNOTIFICATIONCHANNELDESCRIPTORSREQUEST, - output_type=_LISTNOTIFICATIONCHANNELDESCRIPTORSRESPONSE, - serialized_options=b"\202\323\344\223\0026\0224/v3/{name=projects/*}/notificationChannelDescriptors\332A\004name", - ), - _descriptor.MethodDescriptor( - name="GetNotificationChannelDescriptor", - full_name="google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor", - index=1, - containing_service=None, - input_type=_GETNOTIFICATIONCHANNELDESCRIPTORREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNELDESCRIPTOR, - serialized_options=b"\202\323\344\223\0028\0226/v3/{name=projects/*/notificationChannelDescriptors/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="ListNotificationChannels", - full_name="google.monitoring.v3.NotificationChannelService.ListNotificationChannels", - index=2, - containing_service=None, - input_type=_LISTNOTIFICATIONCHANNELSREQUEST, - output_type=_LISTNOTIFICATIONCHANNELSRESPONSE, - serialized_options=b"\202\323\344\223\002,\022*/v3/{name=projects/*}/notificationChannels\332A\004name", - ), - _descriptor.MethodDescriptor( - name="GetNotificationChannel", - full_name="google.monitoring.v3.NotificationChannelService.GetNotificationChannel", - index=3, - containing_service=None, - input_type=_GETNOTIFICATIONCHANNELREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNEL, - serialized_options=b"\202\323\344\223\002.\022,/v3/{name=projects/*/notificationChannels/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="CreateNotificationChannel", - full_name="google.monitoring.v3.NotificationChannelService.CreateNotificationChannel", - index=4, - containing_service=None, - input_type=_CREATENOTIFICATIONCHANNELREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNEL, - serialized_options=b'\202\323\344\223\002B"*/v3/{name=projects/*}/notificationChannels:\024notification_channel\332A\031name,notification_channel', - ), - _descriptor.MethodDescriptor( - name="UpdateNotificationChannel", - full_name="google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel", - index=5, - containing_service=None, - input_type=_UPDATENOTIFICATIONCHANNELREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNEL, - serialized_options=b"\202\323\344\223\002Y2A/v3/{notification_channel.name=projects/*/notificationChannels/*}:\024notification_channel\332A update_mask,notification_channel", - ), - _descriptor.MethodDescriptor( - name="DeleteNotificationChannel", - full_name="google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel", - index=6, - containing_service=None, - input_type=_DELETENOTIFICATIONCHANNELREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002.*,/v3/{name=projects/*/notificationChannels/*}\332A\nname,force", - ), - _descriptor.MethodDescriptor( - name="SendNotificationChannelVerificationCode", - full_name="google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode", - index=7, - containing_service=None, - input_type=_SENDNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b'\202\323\344\223\002F"A/v3/{name=projects/*/notificationChannels/*}:sendVerificationCode:\001*\332A\004name', - ), - _descriptor.MethodDescriptor( - name="GetNotificationChannelVerificationCode", - full_name="google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode", - index=8, - containing_service=None, - input_type=_GETNOTIFICATIONCHANNELVERIFICATIONCODEREQUEST, - output_type=_GETNOTIFICATIONCHANNELVERIFICATIONCODERESPONSE, - serialized_options=b'\202\323\344\223\002E"@/v3/{name=projects/*/notificationChannels/*}:getVerificationCode:\001*\332A\004name', - ), - _descriptor.MethodDescriptor( - name="VerifyNotificationChannel", - full_name="google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel", - index=9, - containing_service=None, - input_type=_VERIFYNOTIFICATIONCHANNELREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2._NOTIFICATIONCHANNEL, - serialized_options=b'\202\323\344\223\0028"3/v3/{name=projects/*/notificationChannels/*}:verify:\001*\332A\tname,code', - ), - ], -) -_sym_db.RegisterServiceDescriptor(_NOTIFICATIONCHANNELSERVICE) - -DESCRIPTOR.services_by_name["NotificationChannelService"] = _NOTIFICATIONCHANNELSERVICE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/notification_service_pb2_grpc.py b/google/cloud/monitoring_v3/proto/notification_service_pb2_grpc.py deleted file mode 100644 index ba8b68d0..00000000 --- a/google/cloud/monitoring_v3/proto/notification_service_pb2_grpc.py +++ /dev/null @@ -1,239 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.monitoring_v3.proto import ( - notification_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2, -) -from google.cloud.monitoring_v3.proto import ( - notification_service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class NotificationChannelServiceStub(object): - """The Notification Channel API provides access to configuration that - controls how messages related to incidents are sent. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ListNotificationChannelDescriptors = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsResponse.FromString, - ) - self.GetNotificationChannelDescriptor = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelDescriptorRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannelDescriptor.FromString, - ) - self.ListNotificationChannels = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsResponse.FromString, - ) - self.GetNotificationChannel = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString, - ) - self.CreateNotificationChannel = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.CreateNotificationChannelRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString, - ) - self.UpdateNotificationChannel = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.UpdateNotificationChannelRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString, - ) - self.DeleteNotificationChannel = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.DeleteNotificationChannelRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.SendNotificationChannelVerificationCode = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.SendNotificationChannelVerificationCodeRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GetNotificationChannelVerificationCode = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeResponse.FromString, - ) - self.VerifyNotificationChannel = channel.unary_unary( - "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.VerifyNotificationChannelRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString, - ) - - -class NotificationChannelServiceServicer(object): - """The Notification Channel API provides access to configuration that - controls how messages related to incidents are sent. - """ - - def ListNotificationChannelDescriptors(self, request, context): - """Lists the descriptors for supported channel types. The use of descriptors - makes it possible for new channel types to be dynamically added. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetNotificationChannelDescriptor(self, request, context): - """Gets a single channel descriptor. The descriptor indicates which fields - are expected / permitted for a notification channel of the given type. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListNotificationChannels(self, request, context): - """Lists the notification channels that have been created for the project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetNotificationChannel(self, request, context): - """Gets a single notification channel. The channel includes the relevant - configuration details with which the channel was created. However, the - response may truncate or omit passwords, API keys, or other private key - matter and thus the response may not be 100% identical to the information - that was supplied in the call to the create method. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateNotificationChannel(self, request, context): - """Creates a new notification channel, representing a single notification - endpoint such as an email address, SMS number, or PagerDuty service. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateNotificationChannel(self, request, context): - """Updates a notification channel. Fields not specified in the field mask - remain unchanged. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteNotificationChannel(self, request, context): - """Deletes a notification channel. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SendNotificationChannelVerificationCode(self, request, context): - """Causes a verification code to be delivered to the channel. The code - can then be supplied in `VerifyNotificationChannel` to verify the channel. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetNotificationChannelVerificationCode(self, request, context): - """Requests a verification code for an already verified channel that can then - be used in a call to VerifyNotificationChannel() on a different channel - with an equivalent identity in the same or in a different project. This - makes it possible to copy a channel between projects without requiring - manual reverification of the channel. If the channel is not in the - verified state, this method will fail (in other words, this may only be - used if the SendNotificationChannelVerificationCode and - VerifyNotificationChannel paths have already been used to put the given - channel into the verified state). - - There is no guarantee that the verification codes returned by this method - will be of a similar structure or form as the ones that are delivered - to the channel via SendNotificationChannelVerificationCode; while - VerifyNotificationChannel() will recognize both the codes delivered via - SendNotificationChannelVerificationCode() and returned from - GetNotificationChannelVerificationCode(), it is typically the case that - the verification codes delivered via - SendNotificationChannelVerificationCode() will be shorter and also - have a shorter expiration (e.g. codes such as "G-123456") whereas - GetVerificationCode() will typically return a much longer, websafe base - 64 encoded string that has a longer expiration time. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def VerifyNotificationChannel(self, request, context): - """Verifies a `NotificationChannel` by proving receipt of the code - delivered to the channel as a result of calling - `SendNotificationChannelVerificationCode`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_NotificationChannelServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListNotificationChannelDescriptors": grpc.unary_unary_rpc_method_handler( - servicer.ListNotificationChannelDescriptors, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsResponse.SerializeToString, - ), - "GetNotificationChannelDescriptor": grpc.unary_unary_rpc_method_handler( - servicer.GetNotificationChannelDescriptor, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelDescriptorRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannelDescriptor.SerializeToString, - ), - "ListNotificationChannels": grpc.unary_unary_rpc_method_handler( - servicer.ListNotificationChannels, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsResponse.SerializeToString, - ), - "GetNotificationChannel": grpc.unary_unary_rpc_method_handler( - servicer.GetNotificationChannel, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.SerializeToString, - ), - "CreateNotificationChannel": grpc.unary_unary_rpc_method_handler( - servicer.CreateNotificationChannel, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.CreateNotificationChannelRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.SerializeToString, - ), - "UpdateNotificationChannel": grpc.unary_unary_rpc_method_handler( - servicer.UpdateNotificationChannel, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.UpdateNotificationChannelRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.SerializeToString, - ), - "DeleteNotificationChannel": grpc.unary_unary_rpc_method_handler( - servicer.DeleteNotificationChannel, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.DeleteNotificationChannelRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "SendNotificationChannelVerificationCode": grpc.unary_unary_rpc_method_handler( - servicer.SendNotificationChannelVerificationCode, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.SendNotificationChannelVerificationCodeRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetNotificationChannelVerificationCode": grpc.unary_unary_rpc_method_handler( - servicer.GetNotificationChannelVerificationCode, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeResponse.SerializeToString, - ), - "VerifyNotificationChannel": grpc.unary_unary_rpc_method_handler( - servicer.VerifyNotificationChannel, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.VerifyNotificationChannelRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.monitoring.v3.NotificationChannelService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/google/cloud/monitoring_v3/proto/service_pb2.py b/google/cloud/monitoring_v3/proto/service_pb2.py deleted file mode 100644 index 9c1ccb69..00000000 --- a/google/cloud/monitoring_v3/proto/service_pb2.py +++ /dev/null @@ -1,2201 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import ( - monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2, -) -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.type import calendar_period_pb2 as google_dot_type_dot_calendar__period__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/service.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\026ServiceMonitoringProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n.google/cloud/monitoring_v3/proto/service.proto\x12\x14google.monitoring.v3\x1a#google/api/monitored_resource.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a!google/type/calendar_period.proto"\x97\x07\n\x07Service\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x36\n\x06\x63ustom\x18\x06 \x01(\x0b\x32$.google.monitoring.v3.Service.CustomH\x00\x12=\n\napp_engine\x18\x07 \x01(\x0b\x32\'.google.monitoring.v3.Service.AppEngineH\x00\x12G\n\x0f\x63loud_endpoints\x18\x08 \x01(\x0b\x32,.google.monitoring.v3.Service.CloudEndpointsH\x00\x12G\n\rcluster_istio\x18\t \x01(\x0b\x32*.google.monitoring.v3.Service.ClusterIstioB\x02\x18\x01H\x00\x12=\n\nmesh_istio\x18\n \x01(\x0b\x32\'.google.monitoring.v3.Service.MeshIstioH\x00\x12:\n\ttelemetry\x18\r \x01(\x0b\x32\'.google.monitoring.v3.Service.Telemetry\x1a\x08\n\x06\x43ustom\x1a\x1e\n\tAppEngine\x12\x11\n\tmodule_id\x18\x01 \x01(\t\x1a!\n\x0e\x43loudEndpoints\x12\x0f\n\x07service\x18\x01 \x01(\t\x1ak\n\x0c\x43lusterIstio\x12\x10\n\x08location\x18\x01 \x01(\t\x12\x14\n\x0c\x63luster_name\x18\x02 \x01(\t\x12\x19\n\x11service_namespace\x18\x03 \x01(\t\x12\x14\n\x0cservice_name\x18\x04 \x01(\t:\x02\x18\x01\x1aN\n\tMeshIstio\x12\x10\n\x08mesh_uid\x18\x01 \x01(\t\x12\x19\n\x11service_namespace\x18\x03 \x01(\t\x12\x14\n\x0cservice_name\x18\x04 \x01(\t\x1a"\n\tTelemetry\x12\x15\n\rresource_name\x18\x01 \x01(\t:\xa7\x01\xea\x41\xa3\x01\n!monitoring.googleapis.com/Service\x12%projects/{project}/services/{service}\x12/organizations/{organization}/services/{service}\x12#folders/{folder}/services/{service}\x12\x01*B\x0c\n\nidentifier"\x91\x05\n\x15ServiceLevelObjective\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x0b \x01(\t\x12L\n\x17service_level_indicator\x18\x03 \x01(\x0b\x32+.google.monitoring.v3.ServiceLevelIndicator\x12\x0c\n\x04goal\x18\x04 \x01(\x01\x12\x33\n\x0erolling_period\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x36\n\x0f\x63\x61lendar_period\x18\x06 \x01(\x0e\x32\x1b.google.type.CalendarPeriodH\x00"4\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\x08\n\x04\x46ULL\x10\x02\x12\x0c\n\x08\x45XPLICIT\x10\x01:\xca\x02\xea\x41\xc6\x02\n/monitoring.googleapis.com/ServiceLevelObjective\x12Vprojects/{project}/services/{service}/serviceLevelObjectives/{service_level_objective}\x12`organizations/{organization}/services/{service}/serviceLevelObjectives/{service_level_objective}\x12Tfolders/{folder}/services/{service}/serviceLevelObjectives/{service_level_objective}\x12\x01* \x01\x42\x08\n\x06period"\xd4\x01\n\x15ServiceLevelIndicator\x12\x33\n\tbasic_sli\x18\x04 \x01(\x0b\x32\x1e.google.monitoring.v3.BasicSliH\x00\x12>\n\rrequest_based\x18\x01 \x01(\x0b\x32%.google.monitoring.v3.RequestBasedSliH\x00\x12>\n\rwindows_based\x18\x02 \x01(\x0b\x32%.google.monitoring.v3.WindowsBasedSliH\x00\x42\x06\n\x04type"\xb6\x02\n\x08\x42\x61sicSli\x12\x0e\n\x06method\x18\x07 \x03(\t\x12\x10\n\x08location\x18\x08 \x03(\t\x12\x0f\n\x07version\x18\t \x03(\t\x12K\n\x0c\x61vailability\x18\x02 \x01(\x0b\x32\x33.google.monitoring.v3.BasicSli.AvailabilityCriteriaH\x00\x12\x41\n\x07latency\x18\x03 \x01(\x0b\x32..google.monitoring.v3.BasicSli.LatencyCriteriaH\x00\x1a\x16\n\x14\x41vailabilityCriteria\x1a?\n\x0fLatencyCriteria\x12,\n\tthreshold\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0e\n\x0csli_criteria"!\n\x05Range\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01"\xa1\x01\n\x0fRequestBasedSli\x12\x41\n\x10good_total_ratio\x18\x01 \x01(\x0b\x32%.google.monitoring.v3.TimeSeriesRatioH\x00\x12\x41\n\x10\x64istribution_cut\x18\x03 \x01(\x0b\x32%.google.monitoring.v3.DistributionCutH\x00\x42\x08\n\x06method"h\n\x0fTimeSeriesRatio\x12\x1b\n\x13good_service_filter\x18\x04 \x01(\t\x12\x1a\n\x12\x62\x61\x64_service_filter\x18\x05 \x01(\t\x12\x1c\n\x14total_service_filter\x18\x06 \x01(\t"Z\n\x0f\x44istributionCut\x12\x1b\n\x13\x64istribution_filter\x18\x04 \x01(\t\x12*\n\x05range\x18\x05 \x01(\x0b\x32\x1b.google.monitoring.v3.Range"\x83\x05\n\x0fWindowsBasedSli\x12 \n\x16good_bad_metric_filter\x18\x05 \x01(\tH\x00\x12`\n\x1agood_total_ratio_threshold\x18\x02 \x01(\x0b\x32:.google.monitoring.v3.WindowsBasedSli.PerformanceThresholdH\x00\x12Q\n\x14metric_mean_in_range\x18\x06 \x01(\x0b\x32\x31.google.monitoring.v3.WindowsBasedSli.MetricRangeH\x00\x12P\n\x13metric_sum_in_range\x18\x07 \x01(\x0b\x32\x31.google.monitoring.v3.WindowsBasedSli.MetricRangeH\x00\x12\x30\n\rwindow_period\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x1a\xb0\x01\n\x14PerformanceThreshold\x12<\n\x0bperformance\x18\x01 \x01(\x0b\x32%.google.monitoring.v3.RequestBasedSliH\x00\x12?\n\x15\x62\x61sic_sli_performance\x18\x03 \x01(\x0b\x32\x1e.google.monitoring.v3.BasicSliH\x00\x12\x11\n\tthreshold\x18\x02 \x01(\x01\x42\x06\n\x04type\x1aN\n\x0bMetricRange\x12\x13\n\x0btime_series\x18\x01 \x01(\t\x12*\n\x05range\x18\x04 \x01(\x0b\x32\x1b.google.monitoring.v3.RangeB\x12\n\x10window_criterionB\xce\x01\n\x18\x63om.google.monitoring.v3B\x16ServiceMonitoringProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_monitored__resource__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_type_dot_calendar__period__pb2.DESCRIPTOR, - ], -) - - -_SERVICELEVELOBJECTIVE_VIEW = _descriptor.EnumDescriptor( - name="View", - full_name="google.monitoring.v3.ServiceLevelObjective.View", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="VIEW_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="FULL", index=1, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="EXPLICIT", index=2, number=1, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1421, - serialized_end=1473, -) -_sym_db.RegisterEnumDescriptor(_SERVICELEVELOBJECTIVE_VIEW) - - -_SERVICE_CUSTOM = _descriptor.Descriptor( - name="Custom", - full_name="google.monitoring.v3.Service.Custom", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=672, - serialized_end=680, -) - -_SERVICE_APPENGINE = _descriptor.Descriptor( - name="AppEngine", - full_name="google.monitoring.v3.Service.AppEngine", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="module_id", - full_name="google.monitoring.v3.Service.AppEngine.module_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=682, - serialized_end=712, -) - -_SERVICE_CLOUDENDPOINTS = _descriptor.Descriptor( - name="CloudEndpoints", - full_name="google.monitoring.v3.Service.CloudEndpoints", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="service", - full_name="google.monitoring.v3.Service.CloudEndpoints.service", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=714, - serialized_end=747, -) - -_SERVICE_CLUSTERISTIO = _descriptor.Descriptor( - name="ClusterIstio", - full_name="google.monitoring.v3.Service.ClusterIstio", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="location", - full_name="google.monitoring.v3.Service.ClusterIstio.location", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.monitoring.v3.Service.ClusterIstio.cluster_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service_namespace", - full_name="google.monitoring.v3.Service.ClusterIstio.service_namespace", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service_name", - full_name="google.monitoring.v3.Service.ClusterIstio.service_name", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"\030\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=749, - serialized_end=856, -) - -_SERVICE_MESHISTIO = _descriptor.Descriptor( - name="MeshIstio", - full_name="google.monitoring.v3.Service.MeshIstio", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="mesh_uid", - full_name="google.monitoring.v3.Service.MeshIstio.mesh_uid", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service_namespace", - full_name="google.monitoring.v3.Service.MeshIstio.service_namespace", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service_name", - full_name="google.monitoring.v3.Service.MeshIstio.service_name", - index=2, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=858, - serialized_end=936, -) - -_SERVICE_TELEMETRY = _descriptor.Descriptor( - name="Telemetry", - full_name="google.monitoring.v3.Service.Telemetry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="resource_name", - full_name="google.monitoring.v3.Service.Telemetry.resource_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=938, - serialized_end=972, -) - -_SERVICE = _descriptor.Descriptor( - name="Service", - full_name="google.monitoring.v3.Service", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.Service.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.Service.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="custom", - full_name="google.monitoring.v3.Service.custom", - index=2, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_engine", - full_name="google.monitoring.v3.Service.app_engine", - index=3, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cloud_endpoints", - full_name="google.monitoring.v3.Service.cloud_endpoints", - index=4, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cluster_istio", - full_name="google.monitoring.v3.Service.cluster_istio", - index=5, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mesh_istio", - full_name="google.monitoring.v3.Service.mesh_istio", - index=6, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="telemetry", - full_name="google.monitoring.v3.Service.telemetry", - index=7, - number=13, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _SERVICE_CUSTOM, - _SERVICE_APPENGINE, - _SERVICE_CLOUDENDPOINTS, - _SERVICE_CLUSTERISTIO, - _SERVICE_MESHISTIO, - _SERVICE_TELEMETRY, - ], - enum_types=[], - serialized_options=b"\352A\243\001\n!monitoring.googleapis.com/Service\022%projects/{project}/services/{service}\022/organizations/{organization}/services/{service}\022#folders/{folder}/services/{service}\022\001*", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="identifier", - full_name="google.monitoring.v3.Service.identifier", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=237, - serialized_end=1156, -) - - -_SERVICELEVELOBJECTIVE = _descriptor.Descriptor( - name="ServiceLevelObjective", - full_name="google.monitoring.v3.ServiceLevelObjective", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.ServiceLevelObjective.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.ServiceLevelObjective.display_name", - index=1, - number=11, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service_level_indicator", - full_name="google.monitoring.v3.ServiceLevelObjective.service_level_indicator", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="goal", - full_name="google.monitoring.v3.ServiceLevelObjective.goal", - index=3, - number=4, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="rolling_period", - full_name="google.monitoring.v3.ServiceLevelObjective.rolling_period", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="calendar_period", - full_name="google.monitoring.v3.ServiceLevelObjective.calendar_period", - index=5, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_SERVICELEVELOBJECTIVE_VIEW], - serialized_options=b"\352A\306\002\n/monitoring.googleapis.com/ServiceLevelObjective\022Vprojects/{project}/services/{service}/serviceLevelObjectives/{service_level_objective}\022`organizations/{organization}/services/{service}/serviceLevelObjectives/{service_level_objective}\022Tfolders/{folder}/services/{service}/serviceLevelObjectives/{service_level_objective}\022\001* \001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="period", - full_name="google.monitoring.v3.ServiceLevelObjective.period", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1159, - serialized_end=1816, -) - - -_SERVICELEVELINDICATOR = _descriptor.Descriptor( - name="ServiceLevelIndicator", - full_name="google.monitoring.v3.ServiceLevelIndicator", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="basic_sli", - full_name="google.monitoring.v3.ServiceLevelIndicator.basic_sli", - index=0, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request_based", - full_name="google.monitoring.v3.ServiceLevelIndicator.request_based", - index=1, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="windows_based", - full_name="google.monitoring.v3.ServiceLevelIndicator.windows_based", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="type", - full_name="google.monitoring.v3.ServiceLevelIndicator.type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1819, - serialized_end=2031, -) - - -_BASICSLI_AVAILABILITYCRITERIA = _descriptor.Descriptor( - name="AvailabilityCriteria", - full_name="google.monitoring.v3.BasicSli.AvailabilityCriteria", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2241, - serialized_end=2263, -) - -_BASICSLI_LATENCYCRITERIA = _descriptor.Descriptor( - name="LatencyCriteria", - full_name="google.monitoring.v3.BasicSli.LatencyCriteria", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="threshold", - full_name="google.monitoring.v3.BasicSli.LatencyCriteria.threshold", - index=0, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2265, - serialized_end=2328, -) - -_BASICSLI = _descriptor.Descriptor( - name="BasicSli", - full_name="google.monitoring.v3.BasicSli", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="method", - full_name="google.monitoring.v3.BasicSli.method", - index=0, - number=7, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="location", - full_name="google.monitoring.v3.BasicSli.location", - index=1, - number=8, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="version", - full_name="google.monitoring.v3.BasicSli.version", - index=2, - number=9, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="availability", - full_name="google.monitoring.v3.BasicSli.availability", - index=3, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="latency", - full_name="google.monitoring.v3.BasicSli.latency", - index=4, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_BASICSLI_AVAILABILITYCRITERIA, _BASICSLI_LATENCYCRITERIA], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="sli_criteria", - full_name="google.monitoring.v3.BasicSli.sli_criteria", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=2034, - serialized_end=2344, -) - - -_RANGE = _descriptor.Descriptor( - name="Range", - full_name="google.monitoring.v3.Range", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="min", - full_name="google.monitoring.v3.Range.min", - index=0, - number=1, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="max", - full_name="google.monitoring.v3.Range.max", - index=1, - number=2, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2346, - serialized_end=2379, -) - - -_REQUESTBASEDSLI = _descriptor.Descriptor( - name="RequestBasedSli", - full_name="google.monitoring.v3.RequestBasedSli", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="good_total_ratio", - full_name="google.monitoring.v3.RequestBasedSli.good_total_ratio", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="distribution_cut", - full_name="google.monitoring.v3.RequestBasedSli.distribution_cut", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="method", - full_name="google.monitoring.v3.RequestBasedSli.method", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=2382, - serialized_end=2543, -) - - -_TIMESERIESRATIO = _descriptor.Descriptor( - name="TimeSeriesRatio", - full_name="google.monitoring.v3.TimeSeriesRatio", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="good_service_filter", - full_name="google.monitoring.v3.TimeSeriesRatio.good_service_filter", - index=0, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="bad_service_filter", - full_name="google.monitoring.v3.TimeSeriesRatio.bad_service_filter", - index=1, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="total_service_filter", - full_name="google.monitoring.v3.TimeSeriesRatio.total_service_filter", - index=2, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2545, - serialized_end=2649, -) - - -_DISTRIBUTIONCUT = _descriptor.Descriptor( - name="DistributionCut", - full_name="google.monitoring.v3.DistributionCut", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="distribution_filter", - full_name="google.monitoring.v3.DistributionCut.distribution_filter", - index=0, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="range", - full_name="google.monitoring.v3.DistributionCut.range", - index=1, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2651, - serialized_end=2741, -) - - -_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD = _descriptor.Descriptor( - name="PerformanceThreshold", - full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="performance", - full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="basic_sli_performance", - full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="threshold", - full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.threshold", - index=2, - number=2, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="type", - full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=3111, - serialized_end=3287, -) - -_WINDOWSBASEDSLI_METRICRANGE = _descriptor.Descriptor( - name="MetricRange", - full_name="google.monitoring.v3.WindowsBasedSli.MetricRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="time_series", - full_name="google.monitoring.v3.WindowsBasedSli.MetricRange.time_series", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="range", - full_name="google.monitoring.v3.WindowsBasedSli.MetricRange.range", - index=1, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3289, - serialized_end=3367, -) - -_WINDOWSBASEDSLI = _descriptor.Descriptor( - name="WindowsBasedSli", - full_name="google.monitoring.v3.WindowsBasedSli", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="good_bad_metric_filter", - full_name="google.monitoring.v3.WindowsBasedSli.good_bad_metric_filter", - index=0, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="good_total_ratio_threshold", - full_name="google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metric_mean_in_range", - full_name="google.monitoring.v3.WindowsBasedSli.metric_mean_in_range", - index=2, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metric_sum_in_range", - full_name="google.monitoring.v3.WindowsBasedSli.metric_sum_in_range", - index=3, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="window_period", - full_name="google.monitoring.v3.WindowsBasedSli.window_period", - index=4, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD, _WINDOWSBASEDSLI_METRICRANGE], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="window_criterion", - full_name="google.monitoring.v3.WindowsBasedSli.window_criterion", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=2744, - serialized_end=3387, -) - -_SERVICE_CUSTOM.containing_type = _SERVICE -_SERVICE_APPENGINE.containing_type = _SERVICE -_SERVICE_CLOUDENDPOINTS.containing_type = _SERVICE -_SERVICE_CLUSTERISTIO.containing_type = _SERVICE -_SERVICE_MESHISTIO.containing_type = _SERVICE -_SERVICE_TELEMETRY.containing_type = _SERVICE -_SERVICE.fields_by_name["custom"].message_type = _SERVICE_CUSTOM -_SERVICE.fields_by_name["app_engine"].message_type = _SERVICE_APPENGINE -_SERVICE.fields_by_name["cloud_endpoints"].message_type = _SERVICE_CLOUDENDPOINTS -_SERVICE.fields_by_name["cluster_istio"].message_type = _SERVICE_CLUSTERISTIO -_SERVICE.fields_by_name["mesh_istio"].message_type = _SERVICE_MESHISTIO -_SERVICE.fields_by_name["telemetry"].message_type = _SERVICE_TELEMETRY -_SERVICE.oneofs_by_name["identifier"].fields.append(_SERVICE.fields_by_name["custom"]) -_SERVICE.fields_by_name["custom"].containing_oneof = _SERVICE.oneofs_by_name[ - "identifier" -] -_SERVICE.oneofs_by_name["identifier"].fields.append( - _SERVICE.fields_by_name["app_engine"] -) -_SERVICE.fields_by_name["app_engine"].containing_oneof = _SERVICE.oneofs_by_name[ - "identifier" -] -_SERVICE.oneofs_by_name["identifier"].fields.append( - _SERVICE.fields_by_name["cloud_endpoints"] -) -_SERVICE.fields_by_name["cloud_endpoints"].containing_oneof = _SERVICE.oneofs_by_name[ - "identifier" -] -_SERVICE.oneofs_by_name["identifier"].fields.append( - _SERVICE.fields_by_name["cluster_istio"] -) -_SERVICE.fields_by_name["cluster_istio"].containing_oneof = _SERVICE.oneofs_by_name[ - "identifier" -] -_SERVICE.oneofs_by_name["identifier"].fields.append( - _SERVICE.fields_by_name["mesh_istio"] -) -_SERVICE.fields_by_name["mesh_istio"].containing_oneof = _SERVICE.oneofs_by_name[ - "identifier" -] -_SERVICELEVELOBJECTIVE.fields_by_name[ - "service_level_indicator" -].message_type = _SERVICELEVELINDICATOR -_SERVICELEVELOBJECTIVE.fields_by_name[ - "rolling_period" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_SERVICELEVELOBJECTIVE.fields_by_name[ - "calendar_period" -].enum_type = google_dot_type_dot_calendar__period__pb2._CALENDARPERIOD -_SERVICELEVELOBJECTIVE_VIEW.containing_type = _SERVICELEVELOBJECTIVE -_SERVICELEVELOBJECTIVE.oneofs_by_name["period"].fields.append( - _SERVICELEVELOBJECTIVE.fields_by_name["rolling_period"] -) -_SERVICELEVELOBJECTIVE.fields_by_name[ - "rolling_period" -].containing_oneof = _SERVICELEVELOBJECTIVE.oneofs_by_name["period"] -_SERVICELEVELOBJECTIVE.oneofs_by_name["period"].fields.append( - _SERVICELEVELOBJECTIVE.fields_by_name["calendar_period"] -) -_SERVICELEVELOBJECTIVE.fields_by_name[ - "calendar_period" -].containing_oneof = _SERVICELEVELOBJECTIVE.oneofs_by_name["period"] -_SERVICELEVELINDICATOR.fields_by_name["basic_sli"].message_type = _BASICSLI -_SERVICELEVELINDICATOR.fields_by_name["request_based"].message_type = _REQUESTBASEDSLI -_SERVICELEVELINDICATOR.fields_by_name["windows_based"].message_type = _WINDOWSBASEDSLI -_SERVICELEVELINDICATOR.oneofs_by_name["type"].fields.append( - _SERVICELEVELINDICATOR.fields_by_name["basic_sli"] -) -_SERVICELEVELINDICATOR.fields_by_name[ - "basic_sli" -].containing_oneof = _SERVICELEVELINDICATOR.oneofs_by_name["type"] -_SERVICELEVELINDICATOR.oneofs_by_name["type"].fields.append( - _SERVICELEVELINDICATOR.fields_by_name["request_based"] -) -_SERVICELEVELINDICATOR.fields_by_name[ - "request_based" -].containing_oneof = _SERVICELEVELINDICATOR.oneofs_by_name["type"] -_SERVICELEVELINDICATOR.oneofs_by_name["type"].fields.append( - _SERVICELEVELINDICATOR.fields_by_name["windows_based"] -) -_SERVICELEVELINDICATOR.fields_by_name[ - "windows_based" -].containing_oneof = _SERVICELEVELINDICATOR.oneofs_by_name["type"] -_BASICSLI_AVAILABILITYCRITERIA.containing_type = _BASICSLI -_BASICSLI_LATENCYCRITERIA.fields_by_name[ - "threshold" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_BASICSLI_LATENCYCRITERIA.containing_type = _BASICSLI -_BASICSLI.fields_by_name["availability"].message_type = _BASICSLI_AVAILABILITYCRITERIA -_BASICSLI.fields_by_name["latency"].message_type = _BASICSLI_LATENCYCRITERIA -_BASICSLI.oneofs_by_name["sli_criteria"].fields.append( - _BASICSLI.fields_by_name["availability"] -) -_BASICSLI.fields_by_name["availability"].containing_oneof = _BASICSLI.oneofs_by_name[ - "sli_criteria" -] -_BASICSLI.oneofs_by_name["sli_criteria"].fields.append( - _BASICSLI.fields_by_name["latency"] -) -_BASICSLI.fields_by_name["latency"].containing_oneof = _BASICSLI.oneofs_by_name[ - "sli_criteria" -] -_REQUESTBASEDSLI.fields_by_name["good_total_ratio"].message_type = _TIMESERIESRATIO -_REQUESTBASEDSLI.fields_by_name["distribution_cut"].message_type = _DISTRIBUTIONCUT -_REQUESTBASEDSLI.oneofs_by_name["method"].fields.append( - _REQUESTBASEDSLI.fields_by_name["good_total_ratio"] -) -_REQUESTBASEDSLI.fields_by_name[ - "good_total_ratio" -].containing_oneof = _REQUESTBASEDSLI.oneofs_by_name["method"] -_REQUESTBASEDSLI.oneofs_by_name["method"].fields.append( - _REQUESTBASEDSLI.fields_by_name["distribution_cut"] -) -_REQUESTBASEDSLI.fields_by_name[ - "distribution_cut" -].containing_oneof = _REQUESTBASEDSLI.oneofs_by_name["method"] -_DISTRIBUTIONCUT.fields_by_name["range"].message_type = _RANGE -_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name[ - "performance" -].message_type = _REQUESTBASEDSLI -_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name[ - "basic_sli_performance" -].message_type = _BASICSLI -_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.containing_type = _WINDOWSBASEDSLI -_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.oneofs_by_name["type"].fields.append( - _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name["performance"] -) -_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name[ - "performance" -].containing_oneof = _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.oneofs_by_name["type"] -_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.oneofs_by_name["type"].fields.append( - _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name["basic_sli_performance"] -) -_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name[ - "basic_sli_performance" -].containing_oneof = _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.oneofs_by_name["type"] -_WINDOWSBASEDSLI_METRICRANGE.fields_by_name["range"].message_type = _RANGE -_WINDOWSBASEDSLI_METRICRANGE.containing_type = _WINDOWSBASEDSLI -_WINDOWSBASEDSLI.fields_by_name[ - "good_total_ratio_threshold" -].message_type = _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD -_WINDOWSBASEDSLI.fields_by_name[ - "metric_mean_in_range" -].message_type = _WINDOWSBASEDSLI_METRICRANGE -_WINDOWSBASEDSLI.fields_by_name[ - "metric_sum_in_range" -].message_type = _WINDOWSBASEDSLI_METRICRANGE -_WINDOWSBASEDSLI.fields_by_name[ - "window_period" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_WINDOWSBASEDSLI.oneofs_by_name["window_criterion"].fields.append( - _WINDOWSBASEDSLI.fields_by_name["good_bad_metric_filter"] -) -_WINDOWSBASEDSLI.fields_by_name[ - "good_bad_metric_filter" -].containing_oneof = _WINDOWSBASEDSLI.oneofs_by_name["window_criterion"] -_WINDOWSBASEDSLI.oneofs_by_name["window_criterion"].fields.append( - _WINDOWSBASEDSLI.fields_by_name["good_total_ratio_threshold"] -) -_WINDOWSBASEDSLI.fields_by_name[ - "good_total_ratio_threshold" -].containing_oneof = _WINDOWSBASEDSLI.oneofs_by_name["window_criterion"] -_WINDOWSBASEDSLI.oneofs_by_name["window_criterion"].fields.append( - _WINDOWSBASEDSLI.fields_by_name["metric_mean_in_range"] -) -_WINDOWSBASEDSLI.fields_by_name[ - "metric_mean_in_range" -].containing_oneof = _WINDOWSBASEDSLI.oneofs_by_name["window_criterion"] -_WINDOWSBASEDSLI.oneofs_by_name["window_criterion"].fields.append( - _WINDOWSBASEDSLI.fields_by_name["metric_sum_in_range"] -) -_WINDOWSBASEDSLI.fields_by_name[ - "metric_sum_in_range" -].containing_oneof = _WINDOWSBASEDSLI.oneofs_by_name["window_criterion"] -DESCRIPTOR.message_types_by_name["Service"] = _SERVICE -DESCRIPTOR.message_types_by_name["ServiceLevelObjective"] = _SERVICELEVELOBJECTIVE -DESCRIPTOR.message_types_by_name["ServiceLevelIndicator"] = _SERVICELEVELINDICATOR -DESCRIPTOR.message_types_by_name["BasicSli"] = _BASICSLI -DESCRIPTOR.message_types_by_name["Range"] = _RANGE -DESCRIPTOR.message_types_by_name["RequestBasedSli"] = _REQUESTBASEDSLI -DESCRIPTOR.message_types_by_name["TimeSeriesRatio"] = _TIMESERIESRATIO -DESCRIPTOR.message_types_by_name["DistributionCut"] = _DISTRIBUTIONCUT -DESCRIPTOR.message_types_by_name["WindowsBasedSli"] = _WINDOWSBASEDSLI -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Service = _reflection.GeneratedProtocolMessageType( - "Service", - (_message.Message,), - { - "Custom": _reflection.GeneratedProtocolMessageType( - "Custom", - (_message.Message,), - { - "DESCRIPTOR": _SERVICE_CUSTOM, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Custom view of service telemetry. Currently a place-holder - pending final design. - - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.Custom) - }, - ), - "AppEngine": _reflection.GeneratedProtocolMessageType( - "AppEngine", - (_message.Message,), - { - "DESCRIPTOR": _SERVICE_APPENGINE, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """App Engine service. Learn more at - https://cloud.google.com/appengine. - - - Attributes: - module_id: - The ID of the App Engine module underlying this service. - Corresponds to the ``module_id`` resource label in the - ``gae_app`` monitored resource: - https://cloud.google.com/monitoring/api/resources#tag_gae_app - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.AppEngine) - }, - ), - "CloudEndpoints": _reflection.GeneratedProtocolMessageType( - "CloudEndpoints", - (_message.Message,), - { - "DESCRIPTOR": _SERVICE_CLOUDENDPOINTS, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Cloud Endpoints service. Learn more at - https://cloud.google.com/endpoints. - - - Attributes: - service: - The name of the Cloud Endpoints service underlying this - service. Corresponds to the ``service`` resource label in the - ``api`` monitored resource: - https://cloud.google.com/monitoring/api/resources#tag_api - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.CloudEndpoints) - }, - ), - "ClusterIstio": _reflection.GeneratedProtocolMessageType( - "ClusterIstio", - (_message.Message,), - { - "DESCRIPTOR": _SERVICE_CLUSTERISTIO, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Istio service scoped to a single Kubernetes cluster. Learn - more at http://istio.io. - - - Attributes: - location: - The location of the Kubernetes cluster in which this Istio - service is defined. Corresponds to the ``location`` resource - label in ``k8s_cluster`` resources. - cluster_name: - The name of the Kubernetes cluster in which this Istio service - is defined. Corresponds to the ``cluster_name`` resource label - in ``k8s_cluster`` resources. - service_namespace: - The namespace of the Istio service underlying this service. - Corresponds to the ``destination_service_namespace`` metric - label in Istio metrics. - service_name: - The name of the Istio service underlying this service. - Corresponds to the ``destination_service_name`` metric label - in Istio metrics. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.ClusterIstio) - }, - ), - "MeshIstio": _reflection.GeneratedProtocolMessageType( - "MeshIstio", - (_message.Message,), - { - "DESCRIPTOR": _SERVICE_MESHISTIO, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Istio service scoped to an Istio mesh - - - Attributes: - mesh_uid: - Identifier for the mesh in which this Istio service is - defined. Corresponds to the ``mesh_uid`` metric label in Istio - metrics. - service_namespace: - The namespace of the Istio service underlying this service. - Corresponds to the ``destination_service_namespace`` metric - label in Istio metrics. - service_name: - The name of the Istio service underlying this service. - Corresponds to the ``destination_service_name`` metric label - in Istio metrics. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.MeshIstio) - }, - ), - "Telemetry": _reflection.GeneratedProtocolMessageType( - "Telemetry", - (_message.Message,), - { - "DESCRIPTOR": _SERVICE_TELEMETRY, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Configuration for how to query telemetry on a Service. - - - Attributes: - resource_name: - The full name of the resource that defines this service. - Formatted as described in - https://cloud.google.com/apis/design/resource_names. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.Telemetry) - }, - ), - "DESCRIPTOR": _SERVICE, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """A ``Service`` is a discrete, autonomous, and - network-accessible unit, designed to solve an individual concern - (`Wikipedia `__). In - Cloud Monitoring, a ``Service`` acts as the root resource under which - operational aspects of the service are accessible. - - - Attributes: - name: - Resource name for this Service. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - display_name: - Name used for UI elements listing this Service. - identifier: - REQUIRED. Service-identifying atoms specifying the underlying - service. - custom: - Custom service type. - app_engine: - Type used for App Engine services. - cloud_endpoints: - Type used for Cloud Endpoints services. - cluster_istio: - Type used for Istio services that live in a Kubernetes - cluster. - mesh_istio: - Type used for Istio services scoped to an Istio mesh. - telemetry: - Configuration for how to query telemetry on a Service. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Service) - }, -) -_sym_db.RegisterMessage(Service) -_sym_db.RegisterMessage(Service.Custom) -_sym_db.RegisterMessage(Service.AppEngine) -_sym_db.RegisterMessage(Service.CloudEndpoints) -_sym_db.RegisterMessage(Service.ClusterIstio) -_sym_db.RegisterMessage(Service.MeshIstio) -_sym_db.RegisterMessage(Service.Telemetry) - -ServiceLevelObjective = _reflection.GeneratedProtocolMessageType( - "ServiceLevelObjective", - (_message.Message,), - { - "DESCRIPTOR": _SERVICELEVELOBJECTIVE, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """A Service-Level Objective (SLO) describes a level of - desired good service. It consists of a service-level indicator (SLI), a - performance goal, and a period over which the objective is to be - evaluated against that goal. The SLO can use SLIs defined in a number of - different manners. Typical SLOs might include “99% of requests in each - rolling week have latency below 200 milliseconds” or “99.5% of requests - in each calendar month return successfully.” - - - Attributes: - name: - Resource name for this ``ServiceLevelObjective``. The format - is: :: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] - display_name: - Name used for UI elements listing this SLO. - service_level_indicator: - The definition of good service, used to measure and calculate - the quality of the ``Service``\ ’s performance with respect to - a single aspect of service quality. - goal: - The fraction of service that must be good in order for this - objective to be met. ``0 < goal <= 0.999``. - period: - The time period over which the objective will be evaluated. - rolling_period: - A rolling time period, semantically “in the past - ````”. Must be an integer multiple of 1 day no - larger than 30 days. - calendar_period: - A calendar period, semantically “since the start of the - current ````”. At this time, only ``DAY``, - ``WEEK``, ``FORTNIGHT``, and ``MONTH`` are supported. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ServiceLevelObjective) - }, -) -_sym_db.RegisterMessage(ServiceLevelObjective) - -ServiceLevelIndicator = _reflection.GeneratedProtocolMessageType( - "ServiceLevelIndicator", - (_message.Message,), - { - "DESCRIPTOR": _SERVICELEVELINDICATOR, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """A Service-Level Indicator (SLI) describes the - “performance” of a service. For some services, the SLI is well-defined. - In such cases, the SLI can be described easily by referencing the - well-known SLI and providing the needed parameters. Alternatively, a - “custom” SLI can be defined with a query to the underlying metric store. - An SLI is defined to be ``good_service / total_service`` over any - queried time interval. The value of performance always falls into the - range ``0 <= performance <= 1``. A custom SLI describes how to compute - this ratio, whether this is by dividing values from a pair of time - series, cutting a ``Distribution`` into good and bad counts, or counting - time windows in which the service complies with a criterion. For - separation of concerns, a single Service-Level Indicator measures - performance for only one aspect of service quality, such as fraction of - successful queries or fast-enough queries. - - - Attributes: - type: - Service level indicators can be grouped by whether the “unit” - of service being measured is based on counts of good requests - or on counts of good time windows - basic_sli: - Basic SLI on a well-known service type. - request_based: - Request-based SLIs - windows_based: - Windows-based SLIs - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ServiceLevelIndicator) - }, -) -_sym_db.RegisterMessage(ServiceLevelIndicator) - -BasicSli = _reflection.GeneratedProtocolMessageType( - "BasicSli", - (_message.Message,), - { - "AvailabilityCriteria": _reflection.GeneratedProtocolMessageType( - "AvailabilityCriteria", - (_message.Message,), - { - "DESCRIPTOR": _BASICSLI_AVAILABILITYCRITERIA, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Future parameters for the availability SLI. - - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.BasicSli.AvailabilityCriteria) - }, - ), - "LatencyCriteria": _reflection.GeneratedProtocolMessageType( - "LatencyCriteria", - (_message.Message,), - { - "DESCRIPTOR": _BASICSLI_LATENCYCRITERIA, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Parameters for a latency threshold SLI. - - - Attributes: - threshold: - Good service is defined to be the count of requests made to - this service that return in no more than ``threshold``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.BasicSli.LatencyCriteria) - }, - ), - "DESCRIPTOR": _BASICSLI, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """An SLI measuring performance on a well-known service type. - Performance will be computed on the basis of pre-defined metrics. The - type of the ``service_resource`` determines the metrics to use and the - ``service_resource.labels`` and ``metric_labels`` are used to construct - a monitoring filter to filter that metric down to just the data relevant - to this service. - - - Attributes: - method: - OPTIONAL: The set of RPCs to which this SLI is relevant. - Telemetry from other methods will not be used to calculate - performance for this SLI. If omitted, this SLI applies to all - the Service’s methods. For service types that don’t support - breaking down by method, setting this field will result in an - error. - location: - OPTIONAL: The set of locations to which this SLI is relevant. - Telemetry from other locations will not be used to calculate - performance for this SLI. If omitted, this SLI applies to all - locations in which the Service has activity. For service types - that don’t support breaking down by location, setting this - field will result in an error. - version: - OPTIONAL: The set of API versions to which this SLI is - relevant. Telemetry from other API versions will not be used - to calculate performance for this SLI. If omitted, this SLI - applies to all API versions. For service types that don’t - support breaking down by version, setting this field will - result in an error. - sli_criteria: - This SLI can be evaluated on the basis of availability or - latency. - availability: - Good service is defined to be the count of requests made to - this service that return successfully. - latency: - Good service is defined to be the count of requests made to - this service that are fast enough with respect to - ``latency.threshold``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.BasicSli) - }, -) -_sym_db.RegisterMessage(BasicSli) -_sym_db.RegisterMessage(BasicSli.AvailabilityCriteria) -_sym_db.RegisterMessage(BasicSli.LatencyCriteria) - -Range = _reflection.GeneratedProtocolMessageType( - "Range", - (_message.Message,), - { - "DESCRIPTOR": _RANGE, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Range of numerical values, inclusive of ``min`` and - exclusive of ``max``. If the open range “< range.max” is desired, set - ``range.min = -infinity``. If the open range “>= range.min” is desired, - set ``range.max = infinity``. - - - Attributes: - min: - Range minimum. - max: - Range maximum. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.Range) - }, -) -_sym_db.RegisterMessage(Range) - -RequestBasedSli = _reflection.GeneratedProtocolMessageType( - "RequestBasedSli", - (_message.Message,), - { - "DESCRIPTOR": _REQUESTBASEDSLI, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """Service Level Indicators for which atomic units of service - are counted directly. - - - Attributes: - method: - The means to compute a ratio of ``good_service`` to - ``total_service``. - good_total_ratio: - \ ``good_total_ratio`` is used when the ratio of - ``good_service`` to ``total_service`` is computed from two - ``TimeSeries``. - distribution_cut: - \ ``distribution_cut`` is used when ``good_service`` is a - count of values aggregated in a ``Distribution`` that fall - into a good range. The ``total_service`` is the total count of - all values aggregated in the ``Distribution``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.RequestBasedSli) - }, -) -_sym_db.RegisterMessage(RequestBasedSli) - -TimeSeriesRatio = _reflection.GeneratedProtocolMessageType( - "TimeSeriesRatio", - (_message.Message,), - { - "DESCRIPTOR": _TIMESERIESRATIO, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """A ``TimeSeriesRatio`` specifies two ``TimeSeries`` to use - for computing the ``good_service / total_service`` ratio. The specified - ``TimeSeries`` must have ``ValueType = DOUBLE`` or ``ValueType = INT64`` - and must have ``MetricKind = DELTA`` or ``MetricKind = CUMULATIVE``. The - ``TimeSeriesRatio`` must specify exactly two of good, bad, and total, - and the relationship ``good_service + bad_service = total_service`` will - be assumed. - - - Attributes: - good_service_filter: - A `monitoring filter - `__ - specifying a ``TimeSeries`` quantifying good service provided. - Must have ``ValueType = DOUBLE`` or ``ValueType = INT64`` and - must have ``MetricKind = DELTA`` or ``MetricKind = - CUMULATIVE``. - bad_service_filter: - A `monitoring filter - `__ - specifying a ``TimeSeries`` quantifying bad service, either - demanded service that was not provided or demanded service - that was of inadequate quality. Must have ``ValueType = - DOUBLE`` or ``ValueType = INT64`` and must have ``MetricKind = - DELTA`` or ``MetricKind = CUMULATIVE``. - total_service_filter: - A `monitoring filter - `__ - specifying a ``TimeSeries`` quantifying total demanded - service. Must have ``ValueType = DOUBLE`` or ``ValueType = - INT64`` and must have ``MetricKind = DELTA`` or ``MetricKind = - CUMULATIVE``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeSeriesRatio) - }, -) -_sym_db.RegisterMessage(TimeSeriesRatio) - -DistributionCut = _reflection.GeneratedProtocolMessageType( - "DistributionCut", - (_message.Message,), - { - "DESCRIPTOR": _DISTRIBUTIONCUT, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """A ``DistributionCut`` defines a ``TimeSeries`` and - thresholds used for measuring good service and total service. The - ``TimeSeries`` must have ``ValueType = DISTRIBUTION`` and - ``MetricKind = DELTA`` or ``MetricKind = CUMULATIVE``. The computed - ``good_service`` will be the count of values x in the ``Distribution`` - such that ``range.min <= x < range.max``. - - - Attributes: - distribution_filter: - A `monitoring filter - `__ - specifying a ``TimeSeries`` aggregating values. Must have - ``ValueType = DISTRIBUTION`` and ``MetricKind = DELTA`` or - ``MetricKind = CUMULATIVE``. - range: - Range of values considered “good.” For a one-sided range, set - one bound to an infinite value. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DistributionCut) - }, -) -_sym_db.RegisterMessage(DistributionCut) - -WindowsBasedSli = _reflection.GeneratedProtocolMessageType( - "WindowsBasedSli", - (_message.Message,), - { - "PerformanceThreshold": _reflection.GeneratedProtocolMessageType( - "PerformanceThreshold", - (_message.Message,), - { - "DESCRIPTOR": _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """A ``PerformanceThreshold`` is used when each window is - good when that window has a sufficiently high ``performance``. - - - Attributes: - type: - The means, either a request-based SLI or a basic SLI, by which - to compute performance over a window. - performance: - \ ``RequestBasedSli`` to evaluate to judge window quality. - basic_sli_performance: - \ ``BasicSli`` to evaluate to judge window quality. - threshold: - If window ``performance >= threshold``, the window is counted - as good. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.WindowsBasedSli.PerformanceThreshold) - }, - ), - "MetricRange": _reflection.GeneratedProtocolMessageType( - "MetricRange", - (_message.Message,), - { - "DESCRIPTOR": _WINDOWSBASEDSLI_METRICRANGE, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """A ``MetricRange`` is used when each window is good when - the value x of a single ``TimeSeries`` satisfies - ``range.min <= x < range.max``. The provided ``TimeSeries`` must have - ``ValueType = INT64`` or ``ValueType = DOUBLE`` and - ``MetricKind = GAUGE``. - - - Attributes: - time_series: - A `monitoring filter - `__ - specifying the ``TimeSeries`` to use for evaluating window - quality. - range: - Range of values considered “good.” For a one-sided range, set - one bound to an infinite value. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.WindowsBasedSli.MetricRange) - }, - ), - "DESCRIPTOR": _WINDOWSBASEDSLI, - "__module__": "google.cloud.monitoring_v3.proto.service_pb2", - "__doc__": """A ``WindowsBasedSli`` defines ``good_service`` as the - count of time windows for which the provided service was of good - quality. Criteria for determining if service was good are embedded in - the ``window_criterion``. - - - Attributes: - window_criterion: - The criterion to use for evaluating window goodness. - good_bad_metric_filter: - A `monitoring filter - `__ - specifying a ``TimeSeries`` with ``ValueType = BOOL``. The - window is good if any ``true`` values appear in the window. - good_total_ratio_threshold: - A window is good if its ``performance`` is high enough. - metric_mean_in_range: - A window is good if the metric’s value is in a good range, - averaged across returned streams. - metric_sum_in_range: - A window is good if the metric’s value is in a good range, - summed across returned streams. - window_period: - Duration over which window quality is evaluated. Must be an - integer fraction of a day and at least ``60s``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.WindowsBasedSli) - }, -) -_sym_db.RegisterMessage(WindowsBasedSli) -_sym_db.RegisterMessage(WindowsBasedSli.PerformanceThreshold) -_sym_db.RegisterMessage(WindowsBasedSli.MetricRange) - - -DESCRIPTOR._options = None -_SERVICE_CLUSTERISTIO._options = None -_SERVICE.fields_by_name["cluster_istio"]._options = None -_SERVICE._options = None -_SERVICELEVELOBJECTIVE._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/service_pb2_grpc.py b/google/cloud/monitoring_v3/proto/service_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/service_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/service_service_pb2.py b/google/cloud/monitoring_v3/proto/service_service_pb2.py deleted file mode 100644 index 036cfec3..00000000 --- a/google/cloud/monitoring_v3/proto/service_service_pb2.py +++ /dev/null @@ -1,1301 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/service_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.monitoring_v3.proto import ( - service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/service_service.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\035ServiceMonitoringServiceProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n6google/cloud/monitoring_v3/proto/service_service.proto\x12\x14google.monitoring.v3\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a.google/cloud/monitoring_v3/proto/service.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto"\x9a\x01\n\x14\x43reateServiceRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\x12!monitoring.googleapis.com/Service\x12\x12\n\nservice_id\x18\x03 \x01(\t\x12\x33\n\x07service\x18\x02 \x01(\x0b\x32\x1d.google.monitoring.v3.ServiceB\x03\xe0\x41\x02"L\n\x11GetServiceRequest\x12\x37\n\x04name\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!monitoring.googleapis.com/Service"\x87\x01\n\x13ListServicesRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\x12!monitoring.googleapis.com/Service\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"`\n\x14ListServicesResponse\x12/\n\x08services\x18\x01 \x03(\x0b\x32\x1d.google.monitoring.v3.Service\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"|\n\x14UpdateServiceRequest\x12\x33\n\x07service\x18\x01 \x01(\x0b\x32\x1d.google.monitoring.v3.ServiceB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"O\n\x14\x44\x65leteServiceRequest\x12\x37\n\x04name\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!monitoring.googleapis.com/Service"\xd6\x01\n"CreateServiceLevelObjectiveRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!monitoring.googleapis.com/Service\x12"\n\x1aservice_level_objective_id\x18\x03 \x01(\t\x12Q\n\x17service_level_objective\x18\x02 \x01(\x0b\x32+.google.monitoring.v3.ServiceLevelObjectiveB\x03\xe0\x41\x02"\xa8\x01\n\x1fGetServiceLevelObjectiveRequest\x12\x45\n\x04name\x18\x01 \x01(\tB7\xe0\x41\x02\xfa\x41\x31\n/monitoring.googleapis.com/ServiceLevelObjective\x12>\n\x04view\x18\x02 \x01(\x0e\x32\x30.google.monitoring.v3.ServiceLevelObjective.View"\xd5\x01\n!ListServiceLevelObjectivesRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!monitoring.googleapis.com/Service\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\x12>\n\x04view\x18\x05 \x01(\x0e\x32\x30.google.monitoring.v3.ServiceLevelObjective.View"\x8c\x01\n"ListServiceLevelObjectivesResponse\x12M\n\x18service_level_objectives\x18\x01 \x03(\x0b\x32+.google.monitoring.v3.ServiceLevelObjective\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\xa8\x01\n"UpdateServiceLevelObjectiveRequest\x12Q\n\x17service_level_objective\x18\x01 \x01(\x0b\x32+.google.monitoring.v3.ServiceLevelObjectiveB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"k\n"DeleteServiceLevelObjectiveRequest\x12\x45\n\x04name\x18\x01 \x01(\tB7\xe0\x41\x02\xfa\x41\x31\n/monitoring.googleapis.com/ServiceLevelObjective2\xea\x0f\n\x18ServiceMonitoringService\x12\x97\x01\n\rCreateService\x12*.google.monitoring.v3.CreateServiceRequest\x1a\x1d.google.monitoring.v3.Service";\x82\xd3\xe4\x93\x02$"\x19/v3/{parent=*/*}/services:\x07service\xda\x41\x0eparent,service\x12~\n\nGetService\x12\'.google.monitoring.v3.GetServiceRequest\x1a\x1d.google.monitoring.v3.Service"(\x82\xd3\xe4\x93\x02\x1b\x12\x19/v3/{name=*/*/services/*}\xda\x41\x04name\x12\x91\x01\n\x0cListServices\x12).google.monitoring.v3.ListServicesRequest\x1a*.google.monitoring.v3.ListServicesResponse"*\x82\xd3\xe4\x93\x02\x1b\x12\x19/v3/{parent=*/*}/services\xda\x41\x06parent\x12\x98\x01\n\rUpdateService\x12*.google.monitoring.v3.UpdateServiceRequest\x1a\x1d.google.monitoring.v3.Service"<\x82\xd3\xe4\x93\x02,2!/v3/{service.name=*/*/services/*}:\x07service\xda\x41\x07service\x12}\n\rDeleteService\x12*.google.monitoring.v3.DeleteServiceRequest\x1a\x16.google.protobuf.Empty"(\x82\xd3\xe4\x93\x02\x1b*\x19/v3/{name=*/*/services/*}\xda\x41\x04name\x12\xfa\x01\n\x1b\x43reateServiceLevelObjective\x12\x38.google.monitoring.v3.CreateServiceLevelObjectiveRequest\x1a+.google.monitoring.v3.ServiceLevelObjective"t\x82\xd3\xe4\x93\x02M"2/v3/{parent=*/*/services/*}/serviceLevelObjectives:\x17service_level_objective\xda\x41\x1eparent,service_level_objective\x12\xc1\x01\n\x18GetServiceLevelObjective\x12\x35.google.monitoring.v3.GetServiceLevelObjectiveRequest\x1a+.google.monitoring.v3.ServiceLevelObjective"A\x82\xd3\xe4\x93\x02\x34\x12\x32/v3/{name=*/*/services/*/serviceLevelObjectives/*}\xda\x41\x04name\x12\xd4\x01\n\x1aListServiceLevelObjectives\x12\x37.google.monitoring.v3.ListServiceLevelObjectivesRequest\x1a\x38.google.monitoring.v3.ListServiceLevelObjectivesResponse"C\x82\xd3\xe4\x93\x02\x34\x12\x32/v3/{parent=*/*/services/*}/serviceLevelObjectives\xda\x41\x06parent\x12\x8c\x02\n\x1bUpdateServiceLevelObjective\x12\x38.google.monitoring.v3.UpdateServiceLevelObjectiveRequest\x1a+.google.monitoring.v3.ServiceLevelObjective"\x85\x01\x82\xd3\xe4\x93\x02\x65\x32J/v3/{service_level_objective.name=*/*/services/*/serviceLevelObjectives/*}:\x17service_level_objective\xda\x41\x17service_level_objective\x12\xb2\x01\n\x1b\x44\x65leteServiceLevelObjective\x12\x38.google.monitoring.v3.DeleteServiceLevelObjectiveRequest\x1a\x16.google.protobuf.Empty"A\x82\xd3\xe4\x93\x02\x34*2/v3/{name=*/*/services/*/serviceLevelObjectives/*}\xda\x41\x04name\x1a\xa9\x01\xca\x41\x19monitoring.googleapis.com\xd2\x41\x89\x01https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.readB\xd5\x01\n\x18\x63om.google.monitoring.v3B\x1dServiceMonitoringServiceProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - ], -) - - -_CREATESERVICEREQUEST = _descriptor.Descriptor( - name="CreateServiceRequest", - full_name="google.monitoring.v3.CreateServiceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.monitoring.v3.CreateServiceRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\022!monitoring.googleapis.com/Service", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service_id", - full_name="google.monitoring.v3.CreateServiceRequest.service_id", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service", - full_name="google.monitoring.v3.CreateServiceRequest.service", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=307, - serialized_end=461, -) - - -_GETSERVICEREQUEST = _descriptor.Descriptor( - name="GetServiceRequest", - full_name="google.monitoring.v3.GetServiceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetServiceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\n!monitoring.googleapis.com/Service", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=463, - serialized_end=539, -) - - -_LISTSERVICESREQUEST = _descriptor.Descriptor( - name="ListServicesRequest", - full_name="google.monitoring.v3.ListServicesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.monitoring.v3.ListServicesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\022!monitoring.googleapis.com/Service", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.ListServicesRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListServicesRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListServicesRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=542, - serialized_end=677, -) - - -_LISTSERVICESRESPONSE = _descriptor.Descriptor( - name="ListServicesResponse", - full_name="google.monitoring.v3.ListServicesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="services", - full_name="google.monitoring.v3.ListServicesResponse.services", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListServicesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=679, - serialized_end=775, -) - - -_UPDATESERVICEREQUEST = _descriptor.Descriptor( - name="UpdateServiceRequest", - full_name="google.monitoring.v3.UpdateServiceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="service", - full_name="google.monitoring.v3.UpdateServiceRequest.service", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.monitoring.v3.UpdateServiceRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=777, - serialized_end=901, -) - - -_DELETESERVICEREQUEST = _descriptor.Descriptor( - name="DeleteServiceRequest", - full_name="google.monitoring.v3.DeleteServiceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.DeleteServiceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\n!monitoring.googleapis.com/Service", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=903, - serialized_end=982, -) - - -_CREATESERVICELEVELOBJECTIVEREQUEST = _descriptor.Descriptor( - name="CreateServiceLevelObjectiveRequest", - full_name="google.monitoring.v3.CreateServiceLevelObjectiveRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.monitoring.v3.CreateServiceLevelObjectiveRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\n!monitoring.googleapis.com/Service", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service_level_objective_id", - full_name="google.monitoring.v3.CreateServiceLevelObjectiveRequest.service_level_objective_id", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="service_level_objective", - full_name="google.monitoring.v3.CreateServiceLevelObjectiveRequest.service_level_objective", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=985, - serialized_end=1199, -) - - -_GETSERVICELEVELOBJECTIVEREQUEST = _descriptor.Descriptor( - name="GetServiceLevelObjectiveRequest", - full_name="google.monitoring.v3.GetServiceLevelObjectiveRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetServiceLevelObjectiveRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A1\n/monitoring.googleapis.com/ServiceLevelObjective", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.monitoring.v3.GetServiceLevelObjectiveRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1202, - serialized_end=1370, -) - - -_LISTSERVICELEVELOBJECTIVESREQUEST = _descriptor.Descriptor( - name="ListServiceLevelObjectivesRequest", - full_name="google.monitoring.v3.ListServiceLevelObjectivesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.monitoring.v3.ListServiceLevelObjectivesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\n!monitoring.googleapis.com/Service", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.monitoring.v3.ListServiceLevelObjectivesRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListServiceLevelObjectivesRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListServiceLevelObjectivesRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.monitoring.v3.ListServiceLevelObjectivesRequest.view", - index=4, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1373, - serialized_end=1586, -) - - -_LISTSERVICELEVELOBJECTIVESRESPONSE = _descriptor.Descriptor( - name="ListServiceLevelObjectivesResponse", - full_name="google.monitoring.v3.ListServiceLevelObjectivesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="service_level_objectives", - full_name="google.monitoring.v3.ListServiceLevelObjectivesResponse.service_level_objectives", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListServiceLevelObjectivesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1589, - serialized_end=1729, -) - - -_UPDATESERVICELEVELOBJECTIVEREQUEST = _descriptor.Descriptor( - name="UpdateServiceLevelObjectiveRequest", - full_name="google.monitoring.v3.UpdateServiceLevelObjectiveRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="service_level_objective", - full_name="google.monitoring.v3.UpdateServiceLevelObjectiveRequest.service_level_objective", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.monitoring.v3.UpdateServiceLevelObjectiveRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1732, - serialized_end=1900, -) - - -_DELETESERVICELEVELOBJECTIVEREQUEST = _descriptor.Descriptor( - name="DeleteServiceLevelObjectiveRequest", - full_name="google.monitoring.v3.DeleteServiceLevelObjectiveRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.DeleteServiceLevelObjectiveRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A1\n/monitoring.googleapis.com/ServiceLevelObjective", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1902, - serialized_end=2009, -) - -_CREATESERVICEREQUEST.fields_by_name[ - "service" -].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICE -_LISTSERVICESRESPONSE.fields_by_name[ - "services" -].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICE -_UPDATESERVICEREQUEST.fields_by_name[ - "service" -].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICE -_UPDATESERVICEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATESERVICELEVELOBJECTIVEREQUEST.fields_by_name[ - "service_level_objective" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICELEVELOBJECTIVE -) -_GETSERVICELEVELOBJECTIVEREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICELEVELOBJECTIVE_VIEW -) -_LISTSERVICELEVELOBJECTIVESREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICELEVELOBJECTIVE_VIEW -) -_LISTSERVICELEVELOBJECTIVESRESPONSE.fields_by_name[ - "service_level_objectives" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICELEVELOBJECTIVE -) -_UPDATESERVICELEVELOBJECTIVEREQUEST.fields_by_name[ - "service_level_objective" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICELEVELOBJECTIVE -) -_UPDATESERVICELEVELOBJECTIVEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -DESCRIPTOR.message_types_by_name["CreateServiceRequest"] = _CREATESERVICEREQUEST -DESCRIPTOR.message_types_by_name["GetServiceRequest"] = _GETSERVICEREQUEST -DESCRIPTOR.message_types_by_name["ListServicesRequest"] = _LISTSERVICESREQUEST -DESCRIPTOR.message_types_by_name["ListServicesResponse"] = _LISTSERVICESRESPONSE -DESCRIPTOR.message_types_by_name["UpdateServiceRequest"] = _UPDATESERVICEREQUEST -DESCRIPTOR.message_types_by_name["DeleteServiceRequest"] = _DELETESERVICEREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateServiceLevelObjectiveRequest" -] = _CREATESERVICELEVELOBJECTIVEREQUEST -DESCRIPTOR.message_types_by_name[ - "GetServiceLevelObjectiveRequest" -] = _GETSERVICELEVELOBJECTIVEREQUEST -DESCRIPTOR.message_types_by_name[ - "ListServiceLevelObjectivesRequest" -] = _LISTSERVICELEVELOBJECTIVESREQUEST -DESCRIPTOR.message_types_by_name[ - "ListServiceLevelObjectivesResponse" -] = _LISTSERVICELEVELOBJECTIVESRESPONSE -DESCRIPTOR.message_types_by_name[ - "UpdateServiceLevelObjectiveRequest" -] = _UPDATESERVICELEVELOBJECTIVEREQUEST -DESCRIPTOR.message_types_by_name[ - "DeleteServiceLevelObjectiveRequest" -] = _DELETESERVICELEVELOBJECTIVEREQUEST -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateServiceRequest = _reflection.GeneratedProtocolMessageType( - "CreateServiceRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATESERVICEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``CreateService`` request. - - - Attributes: - parent: - Required. Resource name of the parent workspace. The format - is: :: projects/[PROJECT_ID_OR_NUMBER] - service_id: - Optional. The Service id to use for this Service. If omitted, - an id will be generated instead. Must match the pattern - ``[a-z0-9\-]+`` - service: - Required. The ``Service`` to create. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateServiceRequest) - }, -) -_sym_db.RegisterMessage(CreateServiceRequest) - -GetServiceRequest = _reflection.GeneratedProtocolMessageType( - "GetServiceRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETSERVICEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``GetService`` request. - - - Attributes: - name: - Required. Resource name of the ``Service``. The format is: :: - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetServiceRequest) - }, -) -_sym_db.RegisterMessage(GetServiceRequest) - -ListServicesRequest = _reflection.GeneratedProtocolMessageType( - "ListServicesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTSERVICESREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``ListServices`` request. - - - Attributes: - parent: - Required. Resource name of the parent containing the listed - services, either a project or a Monitoring Workspace. The - formats are: :: projects/[PROJECT_ID_OR_NUMBER] - workspaces/[HOST_PROJECT_ID_OR_NUMBER] - filter: - A filter specifying what ``Service``\ s to return. The filter - currently supports the following fields: :: - - `identifier_case` - `app_engine.module_id` - - `cloud_endpoints.service` - `cluster_istio.location` - - `cluster_istio.cluster_name` - - `cluster_istio.service_namespace` - - `cluster_istio.service_name` ``identifier_case`` refers to - which option in the identifier oneof is populated. For - example, the filter ``identifier_case = "CUSTOM"`` would match - all services with a value for the ``custom`` field. Valid - options are “CUSTOM”, “APP_ENGINE”, “CLOUD_ENDPOINTS”, and - “CLUSTER_ISTIO”. - page_size: - A non-negative number that is the maximum number of results to - return. When 0, use default page size. - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return - additional results from the previous method call. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListServicesRequest) - }, -) -_sym_db.RegisterMessage(ListServicesRequest) - -ListServicesResponse = _reflection.GeneratedProtocolMessageType( - "ListServicesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTSERVICESRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``ListServices`` response. - - - Attributes: - services: - The ``Service``\ s matching the specified filter. - next_page_token: - If there are more results than have been returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListServicesResponse) - }, -) -_sym_db.RegisterMessage(ListServicesResponse) - -UpdateServiceRequest = _reflection.GeneratedProtocolMessageType( - "UpdateServiceRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATESERVICEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``UpdateService`` request. - - - Attributes: - service: - Required. The ``Service`` to draw updates from. The given - ``name`` specifies the resource to update. - update_mask: - A set of field paths defining which fields to use for the - update. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UpdateServiceRequest) - }, -) -_sym_db.RegisterMessage(UpdateServiceRequest) - -DeleteServiceRequest = _reflection.GeneratedProtocolMessageType( - "DeleteServiceRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETESERVICEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``DeleteService`` request. - - - Attributes: - name: - Required. Resource name of the ``Service`` to delete. The - format is: :: - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteServiceRequest) - }, -) -_sym_db.RegisterMessage(DeleteServiceRequest) - -CreateServiceLevelObjectiveRequest = _reflection.GeneratedProtocolMessageType( - "CreateServiceLevelObjectiveRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATESERVICELEVELOBJECTIVEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``CreateServiceLevelObjective`` request. - - - Attributes: - parent: - Required. Resource name of the parent ``Service``. The format - is: :: - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - service_level_objective_id: - Optional. The ServiceLevelObjective id to use for this - ServiceLevelObjective. If omitted, an id will be generated - instead. Must match the pattern ``[a-z0-9\-]+`` - service_level_objective: - Required. The ``ServiceLevelObjective`` to create. The - provided ``name`` will be respected if no - ``ServiceLevelObjective`` exists with this name. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateServiceLevelObjectiveRequest) - }, -) -_sym_db.RegisterMessage(CreateServiceLevelObjectiveRequest) - -GetServiceLevelObjectiveRequest = _reflection.GeneratedProtocolMessageType( - "GetServiceLevelObjectiveRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETSERVICELEVELOBJECTIVEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``GetServiceLevelObjective`` request. - - - Attributes: - name: - Required. Resource name of the ``ServiceLevelObjective`` to - get. The format is: :: projects/[PROJECT_ID_OR_NUMBER]/se - rvices/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] - view: - View of the ``ServiceLevelObjective`` to return. If - ``DEFAULT``, return the ``ServiceLevelObjective`` as - originally defined. If ``EXPLICIT`` and the - ``ServiceLevelObjective`` is defined in terms of a - ``BasicSli``, replace the ``BasicSli`` with a - ``RequestBasedSli`` spelling out how the SLI is computed. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetServiceLevelObjectiveRequest) - }, -) -_sym_db.RegisterMessage(GetServiceLevelObjectiveRequest) - -ListServiceLevelObjectivesRequest = _reflection.GeneratedProtocolMessageType( - "ListServiceLevelObjectivesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTSERVICELEVELOBJECTIVESREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``ListServiceLevelObjectives`` request. - - - Attributes: - parent: - Required. Resource name of the parent containing the listed - SLOs, either a project or a Monitoring Workspace. The formats - are: :: - projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- - filter: - A filter specifying what ``ServiceLevelObjective``\ s to - return. - page_size: - A non-negative number that is the maximum number of results to - return. When 0, use default page size. - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return - additional results from the previous method call. - view: - View of the ``ServiceLevelObjective``\ s to return. If - ``DEFAULT``, return each ``ServiceLevelObjective`` as - originally defined. If ``EXPLICIT`` and the - ``ServiceLevelObjective`` is defined in terms of a - ``BasicSli``, replace the ``BasicSli`` with a - ``RequestBasedSli`` spelling out how the SLI is computed. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListServiceLevelObjectivesRequest) - }, -) -_sym_db.RegisterMessage(ListServiceLevelObjectivesRequest) - -ListServiceLevelObjectivesResponse = _reflection.GeneratedProtocolMessageType( - "ListServiceLevelObjectivesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTSERVICELEVELOBJECTIVESRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``ListServiceLevelObjectives`` response. - - - Attributes: - service_level_objectives: - The ``ServiceLevelObjective``\ s matching the specified - filter. - next_page_token: - If there are more results than have been returned, then this - field is set to a non-empty value. To see the additional - results, use that value as ``page_token`` in the next call to - this method. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListServiceLevelObjectivesResponse) - }, -) -_sym_db.RegisterMessage(ListServiceLevelObjectivesResponse) - -UpdateServiceLevelObjectiveRequest = _reflection.GeneratedProtocolMessageType( - "UpdateServiceLevelObjectiveRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATESERVICELEVELOBJECTIVEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``UpdateServiceLevelObjective`` request. - - - Attributes: - service_level_objective: - Required. The ``ServiceLevelObjective`` to draw updates from. - The given ``name`` specifies the resource to update. - update_mask: - A set of field paths defining which fields to use for the - update. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UpdateServiceLevelObjectiveRequest) - }, -) -_sym_db.RegisterMessage(UpdateServiceLevelObjectiveRequest) - -DeleteServiceLevelObjectiveRequest = _reflection.GeneratedProtocolMessageType( - "DeleteServiceLevelObjectiveRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETESERVICELEVELOBJECTIVEREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.service_service_pb2", - "__doc__": """The ``DeleteServiceLevelObjective`` request. - - - Attributes: - name: - Required. Resource name of the ``ServiceLevelObjective`` to - delete. The format is: :: projects/[PROJECT_ID_OR_NUMBER] - /services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteServiceLevelObjectiveRequest) - }, -) -_sym_db.RegisterMessage(DeleteServiceLevelObjectiveRequest) - - -DESCRIPTOR._options = None -_CREATESERVICEREQUEST.fields_by_name["parent"]._options = None -_CREATESERVICEREQUEST.fields_by_name["service"]._options = None -_GETSERVICEREQUEST.fields_by_name["name"]._options = None -_LISTSERVICESREQUEST.fields_by_name["parent"]._options = None -_UPDATESERVICEREQUEST.fields_by_name["service"]._options = None -_DELETESERVICEREQUEST.fields_by_name["name"]._options = None -_CREATESERVICELEVELOBJECTIVEREQUEST.fields_by_name["parent"]._options = None -_CREATESERVICELEVELOBJECTIVEREQUEST.fields_by_name[ - "service_level_objective" -]._options = None -_GETSERVICELEVELOBJECTIVEREQUEST.fields_by_name["name"]._options = None -_LISTSERVICELEVELOBJECTIVESREQUEST.fields_by_name["parent"]._options = None -_UPDATESERVICELEVELOBJECTIVEREQUEST.fields_by_name[ - "service_level_objective" -]._options = None -_DELETESERVICELEVELOBJECTIVEREQUEST.fields_by_name["name"]._options = None - -_SERVICEMONITORINGSERVICE = _descriptor.ServiceDescriptor( - name="ServiceMonitoringService", - full_name="google.monitoring.v3.ServiceMonitoringService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\031monitoring.googleapis.com\322A\211\001https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.read", - serialized_start=2012, - serialized_end=4038, - methods=[ - _descriptor.MethodDescriptor( - name="CreateService", - full_name="google.monitoring.v3.ServiceMonitoringService.CreateService", - index=0, - containing_service=None, - input_type=_CREATESERVICEREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICE, - serialized_options=b'\202\323\344\223\002$"\031/v3/{parent=*/*}/services:\007service\332A\016parent,service', - ), - _descriptor.MethodDescriptor( - name="GetService", - full_name="google.monitoring.v3.ServiceMonitoringService.GetService", - index=1, - containing_service=None, - input_type=_GETSERVICEREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICE, - serialized_options=b"\202\323\344\223\002\033\022\031/v3/{name=*/*/services/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="ListServices", - full_name="google.monitoring.v3.ServiceMonitoringService.ListServices", - index=2, - containing_service=None, - input_type=_LISTSERVICESREQUEST, - output_type=_LISTSERVICESRESPONSE, - serialized_options=b"\202\323\344\223\002\033\022\031/v3/{parent=*/*}/services\332A\006parent", - ), - _descriptor.MethodDescriptor( - name="UpdateService", - full_name="google.monitoring.v3.ServiceMonitoringService.UpdateService", - index=3, - containing_service=None, - input_type=_UPDATESERVICEREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICE, - serialized_options=b"\202\323\344\223\002,2!/v3/{service.name=*/*/services/*}:\007service\332A\007service", - ), - _descriptor.MethodDescriptor( - name="DeleteService", - full_name="google.monitoring.v3.ServiceMonitoringService.DeleteService", - index=4, - containing_service=None, - input_type=_DELETESERVICEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002\033*\031/v3/{name=*/*/services/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="CreateServiceLevelObjective", - full_name="google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective", - index=5, - containing_service=None, - input_type=_CREATESERVICELEVELOBJECTIVEREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICELEVELOBJECTIVE, - serialized_options=b'\202\323\344\223\002M"2/v3/{parent=*/*/services/*}/serviceLevelObjectives:\027service_level_objective\332A\036parent,service_level_objective', - ), - _descriptor.MethodDescriptor( - name="GetServiceLevelObjective", - full_name="google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective", - index=6, - containing_service=None, - input_type=_GETSERVICELEVELOBJECTIVEREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICELEVELOBJECTIVE, - serialized_options=b"\202\323\344\223\0024\0222/v3/{name=*/*/services/*/serviceLevelObjectives/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="ListServiceLevelObjectives", - full_name="google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives", - index=7, - containing_service=None, - input_type=_LISTSERVICELEVELOBJECTIVESREQUEST, - output_type=_LISTSERVICELEVELOBJECTIVESRESPONSE, - serialized_options=b"\202\323\344\223\0024\0222/v3/{parent=*/*/services/*}/serviceLevelObjectives\332A\006parent", - ), - _descriptor.MethodDescriptor( - name="UpdateServiceLevelObjective", - full_name="google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective", - index=8, - containing_service=None, - input_type=_UPDATESERVICELEVELOBJECTIVEREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2._SERVICELEVELOBJECTIVE, - serialized_options=b"\202\323\344\223\002e2J/v3/{service_level_objective.name=*/*/services/*/serviceLevelObjectives/*}:\027service_level_objective\332A\027service_level_objective", - ), - _descriptor.MethodDescriptor( - name="DeleteServiceLevelObjective", - full_name="google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective", - index=9, - containing_service=None, - input_type=_DELETESERVICELEVELOBJECTIVEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\0024*2/v3/{name=*/*/services/*/serviceLevelObjectives/*}\332A\004name", - ), - ], -) -_sym_db.RegisterServiceDescriptor(_SERVICEMONITORINGSERVICE) - -DESCRIPTOR.services_by_name["ServiceMonitoringService"] = _SERVICEMONITORINGSERVICE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/service_service_pb2_grpc.py b/google/cloud/monitoring_v3/proto/service_service_pb2_grpc.py deleted file mode 100644 index d9bc2479..00000000 --- a/google/cloud/monitoring_v3/proto/service_service_pb2_grpc.py +++ /dev/null @@ -1,212 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.monitoring_v3.proto import ( - service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2, -) -from google.cloud.monitoring_v3.proto import ( - service_service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class ServiceMonitoringServiceStub(object): - """The Cloud Monitoring Service-Oriented Monitoring API has endpoints for - managing and querying aspects of a workspace's services. These include the - `Service`'s monitored resources, its Service-Level Objectives, and a taxonomy - of categorized Health Metrics. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateService = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/CreateService", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.CreateServiceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.Service.FromString, - ) - self.GetService = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/GetService", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.GetServiceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.Service.FromString, - ) - self.ListServices = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/ListServices", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.ListServicesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.ListServicesResponse.FromString, - ) - self.UpdateService = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/UpdateService", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.UpdateServiceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.Service.FromString, - ) - self.DeleteService = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/DeleteService", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.DeleteServiceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateServiceLevelObjective = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.CreateServiceLevelObjectiveRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.ServiceLevelObjective.FromString, - ) - self.GetServiceLevelObjective = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.GetServiceLevelObjectiveRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.ServiceLevelObjective.FromString, - ) - self.ListServiceLevelObjectives = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.ListServiceLevelObjectivesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.ListServiceLevelObjectivesResponse.FromString, - ) - self.UpdateServiceLevelObjective = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.UpdateServiceLevelObjectiveRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.ServiceLevelObjective.FromString, - ) - self.DeleteServiceLevelObjective = channel.unary_unary( - "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.DeleteServiceLevelObjectiveRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class ServiceMonitoringServiceServicer(object): - """The Cloud Monitoring Service-Oriented Monitoring API has endpoints for - managing and querying aspects of a workspace's services. These include the - `Service`'s monitored resources, its Service-Level Objectives, and a taxonomy - of categorized Health Metrics. - """ - - def CreateService(self, request, context): - """Create a `Service`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetService(self, request, context): - """Get the named `Service`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListServices(self, request, context): - """List `Service`s for this workspace. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateService(self, request, context): - """Update this `Service`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteService(self, request, context): - """Soft delete this `Service`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateServiceLevelObjective(self, request, context): - """Create a `ServiceLevelObjective` for the given `Service`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetServiceLevelObjective(self, request, context): - """Get a `ServiceLevelObjective` by name. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListServiceLevelObjectives(self, request, context): - """List the `ServiceLevelObjective`s for the given `Service`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateServiceLevelObjective(self, request, context): - """Update the given `ServiceLevelObjective`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteServiceLevelObjective(self, request, context): - """Delete the given `ServiceLevelObjective`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_ServiceMonitoringServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateService": grpc.unary_unary_rpc_method_handler( - servicer.CreateService, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.CreateServiceRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.Service.SerializeToString, - ), - "GetService": grpc.unary_unary_rpc_method_handler( - servicer.GetService, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.GetServiceRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.Service.SerializeToString, - ), - "ListServices": grpc.unary_unary_rpc_method_handler( - servicer.ListServices, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.ListServicesRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.ListServicesResponse.SerializeToString, - ), - "UpdateService": grpc.unary_unary_rpc_method_handler( - servicer.UpdateService, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.UpdateServiceRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.Service.SerializeToString, - ), - "DeleteService": grpc.unary_unary_rpc_method_handler( - servicer.DeleteService, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.DeleteServiceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateServiceLevelObjective": grpc.unary_unary_rpc_method_handler( - servicer.CreateServiceLevelObjective, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.CreateServiceLevelObjectiveRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.ServiceLevelObjective.SerializeToString, - ), - "GetServiceLevelObjective": grpc.unary_unary_rpc_method_handler( - servicer.GetServiceLevelObjective, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.GetServiceLevelObjectiveRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.ServiceLevelObjective.SerializeToString, - ), - "ListServiceLevelObjectives": grpc.unary_unary_rpc_method_handler( - servicer.ListServiceLevelObjectives, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.ListServiceLevelObjectivesRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.ListServiceLevelObjectivesResponse.SerializeToString, - ), - "UpdateServiceLevelObjective": grpc.unary_unary_rpc_method_handler( - servicer.UpdateServiceLevelObjective, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.UpdateServiceLevelObjectiveRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__pb2.ServiceLevelObjective.SerializeToString, - ), - "DeleteServiceLevelObjective": grpc.unary_unary_rpc_method_handler( - servicer.DeleteServiceLevelObjective, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_service__service__pb2.DeleteServiceLevelObjectiveRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.monitoring.v3.ServiceMonitoringService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/google/cloud/monitoring_v3/proto/span_context_pb2.py b/google/cloud/monitoring_v3/proto/span_context_pb2.py deleted file mode 100644 index 9e916e9c..00000000 --- a/google/cloud/monitoring_v3/proto/span_context_pb2.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/span_context.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/span_context.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\020SpanContextProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n3google/cloud/monitoring_v3/proto/span_context.proto\x12\x14google.monitoring.v3" \n\x0bSpanContext\x12\x11\n\tspan_name\x18\x01 \x01(\tB\xc8\x01\n\x18\x63om.google.monitoring.v3B\x10SpanContextProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', -) - - -_SPANCONTEXT = _descriptor.Descriptor( - name="SpanContext", - full_name="google.monitoring.v3.SpanContext", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="span_name", - full_name="google.monitoring.v3.SpanContext.span_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=77, - serialized_end=109, -) - -DESCRIPTOR.message_types_by_name["SpanContext"] = _SPANCONTEXT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -SpanContext = _reflection.GeneratedProtocolMessageType( - "SpanContext", - (_message.Message,), - { - "DESCRIPTOR": _SPANCONTEXT, - "__module__": "google.cloud.monitoring_v3.proto.span_context_pb2", - "__doc__": """The context of a span, attached to - [Exemplars][google.api.Distribution.Exemplars] in - [Distribution][google.api.Distribution] values during aggregation. - - It contains the name of a span with format: - - :: - - projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] - - - Attributes: - span_name: - The resource name of the span. The format is: :: projects - /[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] - ``[TRACE_ID]`` is a unique identifier for a trace within a - project; it is a 32-character hexadecimal encoding of a - 16-byte array. ``[SPAN_ID]`` is a unique identifier for a - span within a trace; it is a 16-character hexadecimal encoding - of an 8-byte array. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.SpanContext) - }, -) -_sym_db.RegisterMessage(SpanContext) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/span_context_pb2_grpc.py b/google/cloud/monitoring_v3/proto/span_context_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/span_context_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/uptime_pb2.py b/google/cloud/monitoring_v3/proto/uptime_pb2.py deleted file mode 100644 index bc3389b9..00000000 --- a/google/cloud/monitoring_v3/proto/uptime_pb2.py +++ /dev/null @@ -1,1576 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/uptime.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import ( - monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2, -) -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/uptime.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\013UptimeProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n-google/cloud/monitoring_v3/proto/uptime.proto\x12\x14google.monitoring.v3\x1a#google/api/monitored_resource.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto"\xe6\x01\n\x0fInternalChecker\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x0f\n\x07network\x18\x03 \x01(\t\x12\x10\n\x08gcp_zone\x18\x04 \x01(\t\x12\x17\n\x0fpeer_project_id\x18\x06 \x01(\t\x12:\n\x05state\x18\x07 \x01(\x0e\x32+.google.monitoring.v3.InternalChecker.State"3\n\x05State\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02:\x02\x18\x01"\xfb\x0f\n\x11UptimeCheckConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12;\n\x12monitored_resource\x18\x03 \x01(\x0b\x32\x1d.google.api.MonitoredResourceH\x00\x12O\n\x0eresource_group\x18\x04 \x01(\x0b\x32\x35.google.monitoring.v3.UptimeCheckConfig.ResourceGroupH\x00\x12G\n\nhttp_check\x18\x05 \x01(\x0b\x32\x31.google.monitoring.v3.UptimeCheckConfig.HttpCheckH\x01\x12\x45\n\ttcp_check\x18\x06 \x01(\x0b\x32\x30.google.monitoring.v3.UptimeCheckConfig.TcpCheckH\x01\x12)\n\x06period\x18\x07 \x01(\x0b\x32\x19.google.protobuf.Duration\x12*\n\x07timeout\x18\x08 \x01(\x0b\x32\x19.google.protobuf.Duration\x12P\n\x10\x63ontent_matchers\x18\t \x03(\x0b\x32\x36.google.monitoring.v3.UptimeCheckConfig.ContentMatcher\x12\x41\n\x10selected_regions\x18\n \x03(\x0e\x32\'.google.monitoring.v3.UptimeCheckRegion\x12\x17\n\x0bis_internal\x18\x0f \x01(\x08\x42\x02\x18\x01\x12\x44\n\x11internal_checkers\x18\x0e \x03(\x0b\x32%.google.monitoring.v3.InternalCheckerB\x02\x18\x01\x1a\x61\n\rResourceGroup\x12\x10\n\x08group_id\x18\x01 \x01(\t\x12>\n\rresource_type\x18\x02 \x01(\x0e\x32\'.google.monitoring.v3.GroupResourceType\x1a\xa8\x05\n\tHttpCheck\x12W\n\x0erequest_method\x18\x08 \x01(\x0e\x32?.google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod\x12\x0f\n\x07use_ssl\x18\x01 \x01(\x08\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\x05\x12X\n\tauth_info\x18\x04 \x01(\x0b\x32\x45.google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication\x12\x14\n\x0cmask_headers\x18\x05 \x01(\x08\x12O\n\x07headers\x18\x06 \x03(\x0b\x32>.google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry\x12S\n\x0c\x63ontent_type\x18\t \x01(\x0e\x32=.google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType\x12\x14\n\x0cvalidate_ssl\x18\x07 \x01(\x08\x12\x0c\n\x04\x62ody\x18\n \x01(\x0c\x1a\x39\n\x13\x42\x61sicAuthentication\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\x1a.\n\x0cHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01":\n\rRequestMethod\x12\x16\n\x12METHOD_UNSPECIFIED\x10\x00\x12\x07\n\x03GET\x10\x01\x12\x08\n\x04POST\x10\x02"4\n\x0b\x43ontentType\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0f\n\x0bURL_ENCODED\x10\x01\x1a\x18\n\x08TcpCheck\x12\x0c\n\x04port\x18\x01 \x01(\x05\x1a\x98\x02\n\x0e\x43ontentMatcher\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\\\n\x07matcher\x18\x02 \x01(\x0e\x32K.google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption"\x96\x01\n\x14\x43ontentMatcherOption\x12&\n"CONTENT_MATCHER_OPTION_UNSPECIFIED\x10\x00\x12\x13\n\x0f\x43ONTAINS_STRING\x10\x01\x12\x17\n\x13NOT_CONTAINS_STRING\x10\x02\x12\x11\n\rMATCHES_REGEX\x10\x03\x12\x15\n\x11NOT_MATCHES_REGEX\x10\x04:\xf3\x01\xea\x41\xef\x01\n+monitoring.googleapis.com/UptimeCheckConfig\x12;projects/{project}/uptimeCheckConfigs/{uptime_check_config}\x12\x45organizations/{organization}/uptimeCheckConfigs/{uptime_check_config}\x12\x39\x66olders/{folder}/uptimeCheckConfigs/{uptime_check_config}\x12\x01*B\n\n\x08resourceB\x14\n\x12\x63heck_request_type"n\n\rUptimeCheckIp\x12\x37\n\x06region\x18\x01 \x01(\x0e\x32\'.google.monitoring.v3.UptimeCheckRegion\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x12\n\nip_address\x18\x03 \x01(\t*e\n\x11UptimeCheckRegion\x12\x16\n\x12REGION_UNSPECIFIED\x10\x00\x12\x07\n\x03USA\x10\x01\x12\n\n\x06\x45UROPE\x10\x02\x12\x11\n\rSOUTH_AMERICA\x10\x03\x12\x10\n\x0c\x41SIA_PACIFIC\x10\x04*[\n\x11GroupResourceType\x12\x1d\n\x19RESOURCE_TYPE_UNSPECIFIED\x10\x00\x12\x0c\n\x08INSTANCE\x10\x01\x12\x19\n\x15\x41WS_ELB_LOAD_BALANCER\x10\x02\x42\xc3\x01\n\x18\x63om.google.monitoring.v3B\x0bUptimeProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_monitored__resource__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - ], -) - -_UPTIMECHECKREGION = _descriptor.EnumDescriptor( - name="UptimeCheckRegion", - full_name="google.monitoring.v3.UptimeCheckRegion", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="REGION_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="USA", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="EUROPE", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="SOUTH_AMERICA", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ASIA_PACIFIC", index=4, number=4, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2558, - serialized_end=2659, -) -_sym_db.RegisterEnumDescriptor(_UPTIMECHECKREGION) - -UptimeCheckRegion = enum_type_wrapper.EnumTypeWrapper(_UPTIMECHECKREGION) -_GROUPRESOURCETYPE = _descriptor.EnumDescriptor( - name="GroupResourceType", - full_name="google.monitoring.v3.GroupResourceType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="RESOURCE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="INSTANCE", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="AWS_ELB_LOAD_BALANCER", - index=2, - number=2, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2661, - serialized_end=2752, -) -_sym_db.RegisterEnumDescriptor(_GROUPRESOURCETYPE) - -GroupResourceType = enum_type_wrapper.EnumTypeWrapper(_GROUPRESOURCETYPE) -REGION_UNSPECIFIED = 0 -USA = 1 -EUROPE = 2 -SOUTH_AMERICA = 3 -ASIA_PACIFIC = 4 -RESOURCE_TYPE_UNSPECIFIED = 0 -INSTANCE = 1 -AWS_ELB_LOAD_BALANCER = 2 - - -_INTERNALCHECKER_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.monitoring.v3.InternalChecker.State", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="UNSPECIFIED", index=0, number=0, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="CREATING", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=343, - serialized_end=394, -) -_sym_db.RegisterEnumDescriptor(_INTERNALCHECKER_STATE) - -_UPTIMECHECKCONFIG_HTTPCHECK_REQUESTMETHOD = _descriptor.EnumDescriptor( - name="RequestMethod", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="METHOD_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="GET", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="POST", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1743, - serialized_end=1801, -) -_sym_db.RegisterEnumDescriptor(_UPTIMECHECKCONFIG_HTTPCHECK_REQUESTMETHOD) - -_UPTIMECHECKCONFIG_HTTPCHECK_CONTENTTYPE = _descriptor.EnumDescriptor( - name="ContentType", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="URL_ENCODED", index=1, number=1, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1803, - serialized_end=1855, -) -_sym_db.RegisterEnumDescriptor(_UPTIMECHECKCONFIG_HTTPCHECK_CONTENTTYPE) - -_UPTIMECHECKCONFIG_CONTENTMATCHER_CONTENTMATCHEROPTION = _descriptor.EnumDescriptor( - name="ContentMatcherOption", - full_name="google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="CONTENT_MATCHER_OPTION_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="CONTAINS_STRING", - index=1, - number=1, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="NOT_CONTAINS_STRING", - index=2, - number=2, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="MATCHES_REGEX", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="NOT_MATCHES_REGEX", - index=4, - number=4, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2014, - serialized_end=2164, -) -_sym_db.RegisterEnumDescriptor(_UPTIMECHECKCONFIG_CONTENTMATCHER_CONTENTMATCHEROPTION) - - -_INTERNALCHECKER = _descriptor.Descriptor( - name="InternalChecker", - full_name="google.monitoring.v3.InternalChecker", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.InternalChecker.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.InternalChecker.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="network", - full_name="google.monitoring.v3.InternalChecker.network", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="gcp_zone", - full_name="google.monitoring.v3.InternalChecker.gcp_zone", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="peer_project_id", - full_name="google.monitoring.v3.InternalChecker.peer_project_id", - index=4, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.monitoring.v3.InternalChecker.state", - index=5, - number=7, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_INTERNALCHECKER_STATE], - serialized_options=b"\030\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=168, - serialized_end=398, -) - - -_UPTIMECHECKCONFIG_RESOURCEGROUP = _descriptor.Descriptor( - name="ResourceGroup", - full_name="google.monitoring.v3.UptimeCheckConfig.ResourceGroup", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="group_id", - full_name="google.monitoring.v3.UptimeCheckConfig.ResourceGroup.group_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="resource_type", - full_name="google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1075, - serialized_end=1172, -) - -_UPTIMECHECKCONFIG_HTTPCHECK_BASICAUTHENTICATION = _descriptor.Descriptor( - name="BasicAuthentication", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="username", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication.username", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="password", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication.password", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1636, - serialized_end=1693, -) - -_UPTIMECHECKCONFIG_HTTPCHECK_HEADERSENTRY = _descriptor.Descriptor( - name="HeadersEntry", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1695, - serialized_end=1741, -) - -_UPTIMECHECKCONFIG_HTTPCHECK = _descriptor.Descriptor( - name="HttpCheck", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="request_method", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method", - index=0, - number=8, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="use_ssl", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.use_ssl", - index=1, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="path", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.path", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="port", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.port", - index=3, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="auth_info", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info", - index=4, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mask_headers", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.mask_headers", - index=5, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="headers", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers", - index=6, - number=6, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="content_type", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type", - index=7, - number=9, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="validate_ssl", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.validate_ssl", - index=8, - number=7, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="body", - full_name="google.monitoring.v3.UptimeCheckConfig.HttpCheck.body", - index=9, - number=10, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _UPTIMECHECKCONFIG_HTTPCHECK_BASICAUTHENTICATION, - _UPTIMECHECKCONFIG_HTTPCHECK_HEADERSENTRY, - ], - enum_types=[ - _UPTIMECHECKCONFIG_HTTPCHECK_REQUESTMETHOD, - _UPTIMECHECKCONFIG_HTTPCHECK_CONTENTTYPE, - ], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1175, - serialized_end=1855, -) - -_UPTIMECHECKCONFIG_TCPCHECK = _descriptor.Descriptor( - name="TcpCheck", - full_name="google.monitoring.v3.UptimeCheckConfig.TcpCheck", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="port", - full_name="google.monitoring.v3.UptimeCheckConfig.TcpCheck.port", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1857, - serialized_end=1881, -) - -_UPTIMECHECKCONFIG_CONTENTMATCHER = _descriptor.Descriptor( - name="ContentMatcher", - full_name="google.monitoring.v3.UptimeCheckConfig.ContentMatcher", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="content", - full_name="google.monitoring.v3.UptimeCheckConfig.ContentMatcher.content", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="matcher", - full_name="google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_UPTIMECHECKCONFIG_CONTENTMATCHER_CONTENTMATCHEROPTION], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1884, - serialized_end=2164, -) - -_UPTIMECHECKCONFIG = _descriptor.Descriptor( - name="UptimeCheckConfig", - full_name="google.monitoring.v3.UptimeCheckConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.UptimeCheckConfig.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.monitoring.v3.UptimeCheckConfig.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="monitored_resource", - full_name="google.monitoring.v3.UptimeCheckConfig.monitored_resource", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="resource_group", - full_name="google.monitoring.v3.UptimeCheckConfig.resource_group", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="http_check", - full_name="google.monitoring.v3.UptimeCheckConfig.http_check", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="tcp_check", - full_name="google.monitoring.v3.UptimeCheckConfig.tcp_check", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="period", - full_name="google.monitoring.v3.UptimeCheckConfig.period", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="timeout", - full_name="google.monitoring.v3.UptimeCheckConfig.timeout", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="content_matchers", - full_name="google.monitoring.v3.UptimeCheckConfig.content_matchers", - index=8, - number=9, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="selected_regions", - full_name="google.monitoring.v3.UptimeCheckConfig.selected_regions", - index=9, - number=10, - type=14, - cpp_type=8, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_internal", - full_name="google.monitoring.v3.UptimeCheckConfig.is_internal", - index=10, - number=15, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="internal_checkers", - full_name="google.monitoring.v3.UptimeCheckConfig.internal_checkers", - index=11, - number=14, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _UPTIMECHECKCONFIG_RESOURCEGROUP, - _UPTIMECHECKCONFIG_HTTPCHECK, - _UPTIMECHECKCONFIG_TCPCHECK, - _UPTIMECHECKCONFIG_CONTENTMATCHER, - ], - enum_types=[], - serialized_options=b"\352A\357\001\n+monitoring.googleapis.com/UptimeCheckConfig\022;projects/{project}/uptimeCheckConfigs/{uptime_check_config}\022Eorganizations/{organization}/uptimeCheckConfigs/{uptime_check_config}\0229folders/{folder}/uptimeCheckConfigs/{uptime_check_config}\022\001*", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="resource", - full_name="google.monitoring.v3.UptimeCheckConfig.resource", - index=0, - containing_type=None, - fields=[], - ), - _descriptor.OneofDescriptor( - name="check_request_type", - full_name="google.monitoring.v3.UptimeCheckConfig.check_request_type", - index=1, - containing_type=None, - fields=[], - ), - ], - serialized_start=401, - serialized_end=2444, -) - - -_UPTIMECHECKIP = _descriptor.Descriptor( - name="UptimeCheckIp", - full_name="google.monitoring.v3.UptimeCheckIp", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="region", - full_name="google.monitoring.v3.UptimeCheckIp.region", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="location", - full_name="google.monitoring.v3.UptimeCheckIp.location", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="ip_address", - full_name="google.monitoring.v3.UptimeCheckIp.ip_address", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2446, - serialized_end=2556, -) - -_INTERNALCHECKER.fields_by_name["state"].enum_type = _INTERNALCHECKER_STATE -_INTERNALCHECKER_STATE.containing_type = _INTERNALCHECKER -_UPTIMECHECKCONFIG_RESOURCEGROUP.fields_by_name[ - "resource_type" -].enum_type = _GROUPRESOURCETYPE -_UPTIMECHECKCONFIG_RESOURCEGROUP.containing_type = _UPTIMECHECKCONFIG -_UPTIMECHECKCONFIG_HTTPCHECK_BASICAUTHENTICATION.containing_type = ( - _UPTIMECHECKCONFIG_HTTPCHECK -) -_UPTIMECHECKCONFIG_HTTPCHECK_HEADERSENTRY.containing_type = _UPTIMECHECKCONFIG_HTTPCHECK -_UPTIMECHECKCONFIG_HTTPCHECK.fields_by_name[ - "request_method" -].enum_type = _UPTIMECHECKCONFIG_HTTPCHECK_REQUESTMETHOD -_UPTIMECHECKCONFIG_HTTPCHECK.fields_by_name[ - "auth_info" -].message_type = _UPTIMECHECKCONFIG_HTTPCHECK_BASICAUTHENTICATION -_UPTIMECHECKCONFIG_HTTPCHECK.fields_by_name[ - "headers" -].message_type = _UPTIMECHECKCONFIG_HTTPCHECK_HEADERSENTRY -_UPTIMECHECKCONFIG_HTTPCHECK.fields_by_name[ - "content_type" -].enum_type = _UPTIMECHECKCONFIG_HTTPCHECK_CONTENTTYPE -_UPTIMECHECKCONFIG_HTTPCHECK.containing_type = _UPTIMECHECKCONFIG -_UPTIMECHECKCONFIG_HTTPCHECK_REQUESTMETHOD.containing_type = ( - _UPTIMECHECKCONFIG_HTTPCHECK -) -_UPTIMECHECKCONFIG_HTTPCHECK_CONTENTTYPE.containing_type = _UPTIMECHECKCONFIG_HTTPCHECK -_UPTIMECHECKCONFIG_TCPCHECK.containing_type = _UPTIMECHECKCONFIG -_UPTIMECHECKCONFIG_CONTENTMATCHER.fields_by_name[ - "matcher" -].enum_type = _UPTIMECHECKCONFIG_CONTENTMATCHER_CONTENTMATCHEROPTION -_UPTIMECHECKCONFIG_CONTENTMATCHER.containing_type = _UPTIMECHECKCONFIG -_UPTIMECHECKCONFIG_CONTENTMATCHER_CONTENTMATCHEROPTION.containing_type = ( - _UPTIMECHECKCONFIG_CONTENTMATCHER -) -_UPTIMECHECKCONFIG.fields_by_name[ - "monitored_resource" -].message_type = google_dot_api_dot_monitored__resource__pb2._MONITOREDRESOURCE -_UPTIMECHECKCONFIG.fields_by_name[ - "resource_group" -].message_type = _UPTIMECHECKCONFIG_RESOURCEGROUP -_UPTIMECHECKCONFIG.fields_by_name[ - "http_check" -].message_type = _UPTIMECHECKCONFIG_HTTPCHECK -_UPTIMECHECKCONFIG.fields_by_name[ - "tcp_check" -].message_type = _UPTIMECHECKCONFIG_TCPCHECK -_UPTIMECHECKCONFIG.fields_by_name[ - "period" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_UPTIMECHECKCONFIG.fields_by_name[ - "timeout" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_UPTIMECHECKCONFIG.fields_by_name[ - "content_matchers" -].message_type = _UPTIMECHECKCONFIG_CONTENTMATCHER -_UPTIMECHECKCONFIG.fields_by_name["selected_regions"].enum_type = _UPTIMECHECKREGION -_UPTIMECHECKCONFIG.fields_by_name["internal_checkers"].message_type = _INTERNALCHECKER -_UPTIMECHECKCONFIG.oneofs_by_name["resource"].fields.append( - _UPTIMECHECKCONFIG.fields_by_name["monitored_resource"] -) -_UPTIMECHECKCONFIG.fields_by_name[ - "monitored_resource" -].containing_oneof = _UPTIMECHECKCONFIG.oneofs_by_name["resource"] -_UPTIMECHECKCONFIG.oneofs_by_name["resource"].fields.append( - _UPTIMECHECKCONFIG.fields_by_name["resource_group"] -) -_UPTIMECHECKCONFIG.fields_by_name[ - "resource_group" -].containing_oneof = _UPTIMECHECKCONFIG.oneofs_by_name["resource"] -_UPTIMECHECKCONFIG.oneofs_by_name["check_request_type"].fields.append( - _UPTIMECHECKCONFIG.fields_by_name["http_check"] -) -_UPTIMECHECKCONFIG.fields_by_name[ - "http_check" -].containing_oneof = _UPTIMECHECKCONFIG.oneofs_by_name["check_request_type"] -_UPTIMECHECKCONFIG.oneofs_by_name["check_request_type"].fields.append( - _UPTIMECHECKCONFIG.fields_by_name["tcp_check"] -) -_UPTIMECHECKCONFIG.fields_by_name[ - "tcp_check" -].containing_oneof = _UPTIMECHECKCONFIG.oneofs_by_name["check_request_type"] -_UPTIMECHECKIP.fields_by_name["region"].enum_type = _UPTIMECHECKREGION -DESCRIPTOR.message_types_by_name["InternalChecker"] = _INTERNALCHECKER -DESCRIPTOR.message_types_by_name["UptimeCheckConfig"] = _UPTIMECHECKCONFIG -DESCRIPTOR.message_types_by_name["UptimeCheckIp"] = _UPTIMECHECKIP -DESCRIPTOR.enum_types_by_name["UptimeCheckRegion"] = _UPTIMECHECKREGION -DESCRIPTOR.enum_types_by_name["GroupResourceType"] = _GROUPRESOURCETYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -InternalChecker = _reflection.GeneratedProtocolMessageType( - "InternalChecker", - (_message.Message,), - { - "DESCRIPTOR": _INTERNALCHECKER, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2", - "__doc__": """An internal checker allows Uptime checks to run on - private/internal GCP resources. - - - Attributes: - name: - A unique resource name for this InternalChecker. The format - is: :: projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[ - INTERNAL_CHECKER_ID] ``[PROJECT_ID_OR_NUMBER]`` is the - Stackdriver Workspace project for the Uptime check config - associated with the internal checker. - display_name: - The checker’s human-readable name. The display name should be - unique within a Stackdriver Workspace in order to make it - easier to identify; however, uniqueness is not enforced. - network: - The `GCP VPC network - `__ where the internal - resource lives (ex: “default”). - gcp_zone: - The GCP zone the Uptime check should egress from. Only - respected for internal Uptime checks, where internal_network - is specified. - peer_project_id: - The GCP project ID where the internal checker lives. Not - necessary the same as the Workspace project. - state: - The current operational state of the internal checker. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.InternalChecker) - }, -) -_sym_db.RegisterMessage(InternalChecker) - -UptimeCheckConfig = _reflection.GeneratedProtocolMessageType( - "UptimeCheckConfig", - (_message.Message,), - { - "ResourceGroup": _reflection.GeneratedProtocolMessageType( - "ResourceGroup", - (_message.Message,), - { - "DESCRIPTOR": _UPTIMECHECKCONFIG_RESOURCEGROUP, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2", - "__doc__": """The resource submessage for group checks. It can be used - instead of a monitored resource, when multiple resources are being - monitored. - - - Attributes: - group_id: - The group of resources being monitored. Should be only the - ``[GROUP_ID]``, and not the full-path - ``projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]``. - resource_type: - The resource type of the group members. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UptimeCheckConfig.ResourceGroup) - }, - ), - "HttpCheck": _reflection.GeneratedProtocolMessageType( - "HttpCheck", - (_message.Message,), - { - "BasicAuthentication": _reflection.GeneratedProtocolMessageType( - "BasicAuthentication", - (_message.Message,), - { - "DESCRIPTOR": _UPTIMECHECKCONFIG_HTTPCHECK_BASICAUTHENTICATION, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2", - "__doc__": """The authentication parameters to provide to the specified - resource or URL that requires a username and password. Currently, only - `Basic HTTP authentication `__ is - supported in Uptime checks. - - - Attributes: - username: - The username to use when authenticating with the HTTP server. - password: - The password to use when authenticating with the HTTP server. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication) - }, - ), - "HeadersEntry": _reflection.GeneratedProtocolMessageType( - "HeadersEntry", - (_message.Message,), - { - "DESCRIPTOR": _UPTIMECHECKCONFIG_HTTPCHECK_HEADERSENTRY, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2" - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry) - }, - ), - "DESCRIPTOR": _UPTIMECHECKCONFIG_HTTPCHECK, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2", - "__doc__": """Information involved in an HTTP/HTTPS Uptime check - request. - - - Attributes: - request_method: - The HTTP request method to use for the check. - use_ssl: - If ``true``, use HTTPS instead of HTTP to run the check. - path: - Optional (defaults to “/”). The path to the page against which - to run the check. Will be combined with the ``host`` - (specified within the ``monitored_resource``) and ``port`` to - construct the full URL. If the provided path does not begin - with “/”, a “/” will be prepended automatically. - port: - Optional (defaults to 80 when ``use_ssl`` is ``false``, and - 443 when ``use_ssl`` is ``true``). The TCP port on the HTTP - server against which to run the check. Will be combined with - host (specified within the ``monitored_resource``) and - ``path`` to construct the full URL. - auth_info: - The authentication information. Optional when creating an HTTP - check; defaults to empty. - mask_headers: - Boolean specifiying whether to encrypt the header information. - Encryption should be specified for any headers related to - authentication that you do not wish to be seen when retrieving - the configuration. The server will be responsible for - encrypting the headers. On Get/List calls, if ``mask_headers`` - is set to ``true`` then the headers will be obscured with - ``******.`` - headers: - The list of headers to send as part of the Uptime check - request. If two headers have the same key and different - values, they should be entered as a single header, with the - value being a comma-separated list of all the desired values - as described at - https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). - Entering two separate headers with the same key in a Create - call will cause the first to be overwritten by the second. The - maximum number of headers allowed is 100. - content_type: - The content type to use for the check. - validate_ssl: - Boolean specifying whether to include SSL certificate - validation as a part of the Uptime check. Only applies to - checks where ``monitored_resource`` is set to ``uptime_url``. - If ``use_ssl`` is ``false``, setting ``validate_ssl`` to - ``true`` has no effect. - body: - The request body associated with the HTTP request. If - ``content_type`` is ``URL_ENCODED``, the body passed in must - be URL-encoded. Users can provide a ``Content-Length`` header - via the ``headers`` field or the API will do so. The maximum - byte size is 1 megabyte. Note: As with all ``bytes`` fields - JSON representations are base64 encoded. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UptimeCheckConfig.HttpCheck) - }, - ), - "TcpCheck": _reflection.GeneratedProtocolMessageType( - "TcpCheck", - (_message.Message,), - { - "DESCRIPTOR": _UPTIMECHECKCONFIG_TCPCHECK, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2", - "__doc__": """Information required for a TCP Uptime check request. - - - Attributes: - port: - The TCP port on the server against which to run the check. - Will be combined with host (specified within the - ``monitored_resource``) to construct the full URL. Required. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UptimeCheckConfig.TcpCheck) - }, - ), - "ContentMatcher": _reflection.GeneratedProtocolMessageType( - "ContentMatcher", - (_message.Message,), - { - "DESCRIPTOR": _UPTIMECHECKCONFIG_CONTENTMATCHER, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2", - "__doc__": """Optional. Used to perform content matching. This allows - matching based on substrings and regular expressions, together with - their negations. Only the first 4 MB of an HTTP or HTTPS check’s - response (and the first 1 MB of a TCP check’s response) are examined for - purposes of content matching. - - - Attributes: - content: - String or regex content to match. Maximum 1024 bytes. An empty - ``content`` string indicates no content matching is to be - performed. - matcher: - The type of content matcher that will be applied to the server - output, compared to the ``content`` string when the check is - run. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UptimeCheckConfig.ContentMatcher) - }, - ), - "DESCRIPTOR": _UPTIMECHECKCONFIG, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2", - "__doc__": """This message configures which resources and services to - monitor for availability. - - - Attributes: - name: - A unique resource name for this Uptime check configuration. - The format is: :: projects/[PROJECT_ID_OR_NUMBER]/uptime - CheckConfigs/[UPTIME_CHECK_ID] This field should be omitted - when creating the Uptime check configuration; on create, the - resource name is assigned by the server and included in the - response. - display_name: - A human-friendly name for the Uptime check configuration. The - display name should be unique within a Stackdriver Workspace - in order to make it easier to identify; however, uniqueness is - not enforced. Required. - resource: - The resource the check is checking. Required. - monitored_resource: - The `monitored resource - `__ - associated with the configuration. The following monitored - resource types are supported for Uptime checks: - ``uptime_url``, ``gce_instance``, ``gae_app``, - ``aws_ec2_instance``, ``aws_elb_load_balancer`` - resource_group: - The group resource associated with the configuration. - check_request_type: - The type of Uptime check request. - http_check: - Contains information needed to make an HTTP or HTTPS check. - tcp_check: - Contains information needed to make a TCP check. - period: - How often, in seconds, the Uptime check is performed. - Currently, the only supported values are ``60s`` (1 minute), - ``300s`` (5 minutes), ``600s`` (10 minutes), and ``900s`` (15 - minutes). Optional, defaults to ``60s``. - timeout: - The maximum amount of time to wait for the request to complete - (must be between 1 and 60 seconds). Required. - content_matchers: - The content that is expected to appear in the data returned by - the target server against which the check is run. Currently, - only the first entry in the ``content_matchers`` list is - supported, and additional entries will be ignored. This field - is optional and should only be specified if a content match is - required as part of the/ Uptime check. - selected_regions: - The list of regions from which the check will be run. Some - regions contain one location, and others contain more than - one. If this field is specified, enough regions must be - provided to include a minimum of 3 locations. Not specifying - this field will result in Uptime checks running from all - available regions. - is_internal: - If this is ``true``, then checks are made only from the - ‘internal_checkers’. If it is ``false``, then checks are made - only from the ‘selected_regions’. It is an error to provide - ‘selected_regions’ when is_internal is ``true``, or to provide - ‘internal_checkers’ when is_internal is ``false``. - internal_checkers: - The internal checkers that this check will egress from. If - ``is_internal`` is ``true`` and this list is empty, the check - will egress from all the InternalCheckers configured for the - project that owns this ``UptimeCheckConfig``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UptimeCheckConfig) - }, -) -_sym_db.RegisterMessage(UptimeCheckConfig) -_sym_db.RegisterMessage(UptimeCheckConfig.ResourceGroup) -_sym_db.RegisterMessage(UptimeCheckConfig.HttpCheck) -_sym_db.RegisterMessage(UptimeCheckConfig.HttpCheck.BasicAuthentication) -_sym_db.RegisterMessage(UptimeCheckConfig.HttpCheck.HeadersEntry) -_sym_db.RegisterMessage(UptimeCheckConfig.TcpCheck) -_sym_db.RegisterMessage(UptimeCheckConfig.ContentMatcher) - -UptimeCheckIp = _reflection.GeneratedProtocolMessageType( - "UptimeCheckIp", - (_message.Message,), - { - "DESCRIPTOR": _UPTIMECHECKIP, - "__module__": "google.cloud.monitoring_v3.proto.uptime_pb2", - "__doc__": """Contains the region, location, and list of IP addresses - where checkers in the location run from. - - - Attributes: - region: - A broad region category in which the IP address is located. - location: - A more specific location within the region that typically - encodes a particular city/town/metro (and its containing - state/province or country) within the broader umbrella region - category. - ip_address: - The IP address from which the Uptime check originates. This is - a fully specified IP address (not an IP address range). Most - IP addresses, as of this publication, are in IPv4 format; - however, one should not rely on the IP addresses being in IPv4 - format indefinitely, and should support interpreting this - field in either IPv4 or IPv6 format. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UptimeCheckIp) - }, -) -_sym_db.RegisterMessage(UptimeCheckIp) - - -DESCRIPTOR._options = None -_INTERNALCHECKER._options = None -_UPTIMECHECKCONFIG_HTTPCHECK_HEADERSENTRY._options = None -_UPTIMECHECKCONFIG.fields_by_name["is_internal"]._options = None -_UPTIMECHECKCONFIG.fields_by_name["internal_checkers"]._options = None -_UPTIMECHECKCONFIG._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/uptime_pb2_grpc.py b/google/cloud/monitoring_v3/proto/uptime_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/monitoring_v3/proto/uptime_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/monitoring_v3/proto/uptime_service_pb2.py b/google/cloud/monitoring_v3/proto/uptime_service_pb2.py deleted file mode 100644 index ec031050..00000000 --- a/google/cloud/monitoring_v3/proto/uptime_service_pb2.py +++ /dev/null @@ -1,833 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/monitoring_v3/proto/uptime_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.monitoring_v3.proto import ( - uptime_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/monitoring_v3/proto/uptime_service.proto", - package="google.monitoring.v3", - syntax="proto3", - serialized_options=b"\n\030com.google.monitoring.v3B\022UptimeServiceProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3\352\002\035Google::Cloud::Monitoring::V3", - serialized_pb=b'\n5google/cloud/monitoring_v3/proto/uptime_service.proto\x12\x14google.monitoring.v3\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a-google/cloud/monitoring_v3/proto/uptime.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto"\x8b\x01\n\x1dListUptimeCheckConfigsRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\x12+monitoring.googleapis.com/UptimeCheckConfig\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"\x94\x01\n\x1eListUptimeCheckConfigsResponse\x12\x45\n\x14uptime_check_configs\x18\x01 \x03(\x0b\x32\'.google.monitoring.v3.UptimeCheckConfig\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x12\n\ntotal_size\x18\x03 \x01(\x05"`\n\x1bGetUptimeCheckConfigRequest\x12\x41\n\x04name\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+monitoring.googleapis.com/UptimeCheckConfig"\xb0\x01\n\x1e\x43reateUptimeCheckConfigRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\x12+monitoring.googleapis.com/UptimeCheckConfig\x12I\n\x13uptime_check_config\x18\x02 \x01(\x0b\x32\'.google.monitoring.v3.UptimeCheckConfigB\x03\xe0\x41\x02"\x9c\x01\n\x1eUpdateUptimeCheckConfigRequest\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12I\n\x13uptime_check_config\x18\x03 \x01(\x0b\x32\'.google.monitoring.v3.UptimeCheckConfigB\x03\xe0\x41\x02"c\n\x1e\x44\x65leteUptimeCheckConfigRequest\x12\x41\n\x04name\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+monitoring.googleapis.com/UptimeCheckConfig"B\n\x19ListUptimeCheckIpsRequest\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"t\n\x1aListUptimeCheckIpsResponse\x12=\n\x10uptime_check_ips\x18\x01 \x03(\x0b\x32#.google.monitoring.v3.UptimeCheckIp\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xbd\n\n\x12UptimeCheckService\x12\xc0\x01\n\x16ListUptimeCheckConfigs\x12\x33.google.monitoring.v3.ListUptimeCheckConfigsRequest\x1a\x34.google.monitoring.v3.ListUptimeCheckConfigsResponse";\x82\xd3\xe4\x93\x02,\x12*/v3/{parent=projects/*}/uptimeCheckConfigs\xda\x41\x06parent\x12\xad\x01\n\x14GetUptimeCheckConfig\x12\x31.google.monitoring.v3.GetUptimeCheckConfigRequest\x1a\'.google.monitoring.v3.UptimeCheckConfig"9\x82\xd3\xe4\x93\x02,\x12*/v3/{name=projects/*/uptimeCheckConfigs/*}\xda\x41\x04name\x12\xde\x01\n\x17\x43reateUptimeCheckConfig\x12\x34.google.monitoring.v3.CreateUptimeCheckConfigRequest\x1a\'.google.monitoring.v3.UptimeCheckConfig"d\x82\xd3\xe4\x93\x02\x41"*/v3/{parent=projects/*}/uptimeCheckConfigs:\x13uptime_check_config\xda\x41\x1aparent,uptime_check_config\x12\xeb\x01\n\x17UpdateUptimeCheckConfig\x12\x34.google.monitoring.v3.UpdateUptimeCheckConfigRequest\x1a\'.google.monitoring.v3.UptimeCheckConfig"q\x82\xd3\xe4\x93\x02U2>/v3/{uptime_check_config.name=projects/*/uptimeCheckConfigs/*}:\x13uptime_check_config\xda\x41\x13uptime_check_config\x12\xa2\x01\n\x17\x44\x65leteUptimeCheckConfig\x12\x34.google.monitoring.v3.DeleteUptimeCheckConfigRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v3/{name=projects/*/uptimeCheckConfigs/*}\xda\x41\x04name\x12\x93\x01\n\x12ListUptimeCheckIps\x12/.google.monitoring.v3.ListUptimeCheckIpsRequest\x1a\x30.google.monitoring.v3.ListUptimeCheckIpsResponse"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/v3/uptimeCheckIps\x1a\xa9\x01\xca\x41\x19monitoring.googleapis.com\xd2\x41\x89\x01https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.readB\xca\x01\n\x18\x63om.google.monitoring.v3B\x12UptimeServiceProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3\xea\x02\x1dGoogle::Cloud::Monitoring::V3b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - ], -) - - -_LISTUPTIMECHECKCONFIGSREQUEST = _descriptor.Descriptor( - name="ListUptimeCheckConfigsRequest", - full_name="google.monitoring.v3.ListUptimeCheckConfigsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.monitoring.v3.ListUptimeCheckConfigsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\022+monitoring.googleapis.com/UptimeCheckConfig", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListUptimeCheckConfigsRequest.page_size", - index=1, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListUptimeCheckConfigsRequest.page_token", - index=2, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=337, - serialized_end=476, -) - - -_LISTUPTIMECHECKCONFIGSRESPONSE = _descriptor.Descriptor( - name="ListUptimeCheckConfigsResponse", - full_name="google.monitoring.v3.ListUptimeCheckConfigsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="uptime_check_configs", - full_name="google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListUptimeCheckConfigsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="total_size", - full_name="google.monitoring.v3.ListUptimeCheckConfigsResponse.total_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=479, - serialized_end=627, -) - - -_GETUPTIMECHECKCONFIGREQUEST = _descriptor.Descriptor( - name="GetUptimeCheckConfigRequest", - full_name="google.monitoring.v3.GetUptimeCheckConfigRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.GetUptimeCheckConfigRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+monitoring.googleapis.com/UptimeCheckConfig", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=629, - serialized_end=725, -) - - -_CREATEUPTIMECHECKCONFIGREQUEST = _descriptor.Descriptor( - name="CreateUptimeCheckConfigRequest", - full_name="google.monitoring.v3.CreateUptimeCheckConfigRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.monitoring.v3.CreateUptimeCheckConfigRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\022+monitoring.googleapis.com/UptimeCheckConfig", - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="uptime_check_config", - full_name="google.monitoring.v3.CreateUptimeCheckConfigRequest.uptime_check_config", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=728, - serialized_end=904, -) - - -_UPDATEUPTIMECHECKCONFIGREQUEST = _descriptor.Descriptor( - name="UpdateUptimeCheckConfigRequest", - full_name="google.monitoring.v3.UpdateUptimeCheckConfigRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.monitoring.v3.UpdateUptimeCheckConfigRequest.update_mask", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="uptime_check_config", - full_name="google.monitoring.v3.UpdateUptimeCheckConfigRequest.uptime_check_config", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=907, - serialized_end=1063, -) - - -_DELETEUPTIMECHECKCONFIGREQUEST = _descriptor.Descriptor( - name="DeleteUptimeCheckConfigRequest", - full_name="google.monitoring.v3.DeleteUptimeCheckConfigRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.monitoring.v3.DeleteUptimeCheckConfigRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+monitoring.googleapis.com/UptimeCheckConfig", - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1065, - serialized_end=1164, -) - - -_LISTUPTIMECHECKIPSREQUEST = _descriptor.Descriptor( - name="ListUptimeCheckIpsRequest", - full_name="google.monitoring.v3.ListUptimeCheckIpsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.monitoring.v3.ListUptimeCheckIpsRequest.page_size", - index=0, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.monitoring.v3.ListUptimeCheckIpsRequest.page_token", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1166, - serialized_end=1232, -) - - -_LISTUPTIMECHECKIPSRESPONSE = _descriptor.Descriptor( - name="ListUptimeCheckIpsResponse", - full_name="google.monitoring.v3.ListUptimeCheckIpsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="uptime_check_ips", - full_name="google.monitoring.v3.ListUptimeCheckIpsResponse.uptime_check_ips", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.monitoring.v3.ListUptimeCheckIpsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1234, - serialized_end=1350, -) - -_LISTUPTIMECHECKCONFIGSRESPONSE.fields_by_name[ - "uptime_check_configs" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG -) -_CREATEUPTIMECHECKCONFIGREQUEST.fields_by_name[ - "uptime_check_config" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG -) -_UPDATEUPTIMECHECKCONFIGREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_UPDATEUPTIMECHECKCONFIGREQUEST.fields_by_name[ - "uptime_check_config" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG -) -_LISTUPTIMECHECKIPSRESPONSE.fields_by_name[ - "uptime_check_ips" -].message_type = ( - google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKIP -) -DESCRIPTOR.message_types_by_name[ - "ListUptimeCheckConfigsRequest" -] = _LISTUPTIMECHECKCONFIGSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListUptimeCheckConfigsResponse" -] = _LISTUPTIMECHECKCONFIGSRESPONSE -DESCRIPTOR.message_types_by_name[ - "GetUptimeCheckConfigRequest" -] = _GETUPTIMECHECKCONFIGREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateUptimeCheckConfigRequest" -] = _CREATEUPTIMECHECKCONFIGREQUEST -DESCRIPTOR.message_types_by_name[ - "UpdateUptimeCheckConfigRequest" -] = _UPDATEUPTIMECHECKCONFIGREQUEST -DESCRIPTOR.message_types_by_name[ - "DeleteUptimeCheckConfigRequest" -] = _DELETEUPTIMECHECKCONFIGREQUEST -DESCRIPTOR.message_types_by_name[ - "ListUptimeCheckIpsRequest" -] = _LISTUPTIMECHECKIPSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListUptimeCheckIpsResponse" -] = _LISTUPTIMECHECKIPSRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ListUptimeCheckConfigsRequest = _reflection.GeneratedProtocolMessageType( - "ListUptimeCheckConfigsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTUPTIMECHECKCONFIGSREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.uptime_service_pb2", - "__doc__": """The protocol for the ``ListUptimeCheckConfigs`` request. - - - Attributes: - parent: - Required. The project whose Uptime check configurations are - listed. The format is: :: projects/[PROJECT_ID_OR_NUMBER] - page_size: - The maximum number of results to return in a single response. - The server may further constrain the maximum number of results - returned in a single page. If the page_size is <=0, the server - will decide the number of results to be returned. - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return more - results from the previous method call. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListUptimeCheckConfigsRequest) - }, -) -_sym_db.RegisterMessage(ListUptimeCheckConfigsRequest) - -ListUptimeCheckConfigsResponse = _reflection.GeneratedProtocolMessageType( - "ListUptimeCheckConfigsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTUPTIMECHECKCONFIGSRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.uptime_service_pb2", - "__doc__": """The protocol for the ``ListUptimeCheckConfigs`` response. - - - Attributes: - uptime_check_configs: - The returned Uptime check configurations. - next_page_token: - This field represents the pagination token to retrieve the - next page of results. If the value is empty, it means no - further results for the request. To retrieve the next page of - results, the value of the next_page_token is passed to the - subsequent List method call (in the request message’s - page_token field). - total_size: - The total number of Uptime check configurations for the - project, irrespective of any pagination. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListUptimeCheckConfigsResponse) - }, -) -_sym_db.RegisterMessage(ListUptimeCheckConfigsResponse) - -GetUptimeCheckConfigRequest = _reflection.GeneratedProtocolMessageType( - "GetUptimeCheckConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETUPTIMECHECKCONFIGREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.uptime_service_pb2", - "__doc__": """The protocol for the ``GetUptimeCheckConfig`` request. - - - Attributes: - name: - Required. The Uptime check configuration to retrieve. The - format is: :: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheck - Configs/[UPTIME_CHECK_ID] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.GetUptimeCheckConfigRequest) - }, -) -_sym_db.RegisterMessage(GetUptimeCheckConfigRequest) - -CreateUptimeCheckConfigRequest = _reflection.GeneratedProtocolMessageType( - "CreateUptimeCheckConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEUPTIMECHECKCONFIGREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.uptime_service_pb2", - "__doc__": """The protocol for the ``CreateUptimeCheckConfig`` request. - - - Attributes: - parent: - Required. The project in which to create the Uptime check. The - format is: :: projects/[PROJECT_ID_OR_NUMBER] - uptime_check_config: - Required. The new Uptime check configuration. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateUptimeCheckConfigRequest) - }, -) -_sym_db.RegisterMessage(CreateUptimeCheckConfigRequest) - -UpdateUptimeCheckConfigRequest = _reflection.GeneratedProtocolMessageType( - "UpdateUptimeCheckConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEUPTIMECHECKCONFIGREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.uptime_service_pb2", - "__doc__": """The protocol for the ``UpdateUptimeCheckConfig`` request. - - - Attributes: - update_mask: - Optional. If present, only the listed fields in the current - Uptime check configuration are updated with values from the - new configuration. If this field is empty, then the current - configuration is completely replaced with the new - configuration. - uptime_check_config: - Required. If an ``updateMask`` has been specified, this field - gives the values for the set of fields mentioned in the - ``updateMask``. If an ``updateMask`` has not been given, this - Uptime check configuration replaces the current configuration. - If a field is mentioned in ``updateMask`` but the corresonding - field is omitted in this partial Uptime check configuration, - it has the effect of deleting/clearing the field from the - configuration on the server. The following fields can be - updated: ``display_name``, ``http_check``, ``tcp_check``, - ``timeout``, ``content_matchers``, and ``selected_regions``. - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.UpdateUptimeCheckConfigRequest) - }, -) -_sym_db.RegisterMessage(UpdateUptimeCheckConfigRequest) - -DeleteUptimeCheckConfigRequest = _reflection.GeneratedProtocolMessageType( - "DeleteUptimeCheckConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEUPTIMECHECKCONFIGREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.uptime_service_pb2", - "__doc__": """The protocol for the ``DeleteUptimeCheckConfig`` request. - - - Attributes: - name: - Required. The Uptime check configuration to delete. The format - is: :: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs - /[UPTIME_CHECK_ID] - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteUptimeCheckConfigRequest) - }, -) -_sym_db.RegisterMessage(DeleteUptimeCheckConfigRequest) - -ListUptimeCheckIpsRequest = _reflection.GeneratedProtocolMessageType( - "ListUptimeCheckIpsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTUPTIMECHECKIPSREQUEST, - "__module__": "google.cloud.monitoring_v3.proto.uptime_service_pb2", - "__doc__": """The protocol for the ``ListUptimeCheckIps`` request. - - - Attributes: - page_size: - The maximum number of results to return in a single response. - The server may further constrain the maximum number of results - returned in a single page. If the page_size is <=0, the server - will decide the number of results to be returned. NOTE: this - field is not yet implemented - page_token: - If this field is not empty then it must contain the - ``nextPageToken`` value returned by a previous call to this - method. Using this field causes the method to return more - results from the previous method call. NOTE: this field is not - yet implemented - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListUptimeCheckIpsRequest) - }, -) -_sym_db.RegisterMessage(ListUptimeCheckIpsRequest) - -ListUptimeCheckIpsResponse = _reflection.GeneratedProtocolMessageType( - "ListUptimeCheckIpsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTUPTIMECHECKIPSRESPONSE, - "__module__": "google.cloud.monitoring_v3.proto.uptime_service_pb2", - "__doc__": """The protocol for the ``ListUptimeCheckIps`` response. - - - Attributes: - uptime_check_ips: - The returned list of IP addresses (including region and - location) that the checkers run from. - next_page_token: - This field represents the pagination token to retrieve the - next page of results. If the value is empty, it means no - further results for the request. To retrieve the next page of - results, the value of the next_page_token is passed to the - subsequent List method call (in the request message’s - page_token field). NOTE: this field is not yet implemented - """, - # @@protoc_insertion_point(class_scope:google.monitoring.v3.ListUptimeCheckIpsResponse) - }, -) -_sym_db.RegisterMessage(ListUptimeCheckIpsResponse) - - -DESCRIPTOR._options = None -_LISTUPTIMECHECKCONFIGSREQUEST.fields_by_name["parent"]._options = None -_GETUPTIMECHECKCONFIGREQUEST.fields_by_name["name"]._options = None -_CREATEUPTIMECHECKCONFIGREQUEST.fields_by_name["parent"]._options = None -_CREATEUPTIMECHECKCONFIGREQUEST.fields_by_name["uptime_check_config"]._options = None -_UPDATEUPTIMECHECKCONFIGREQUEST.fields_by_name["uptime_check_config"]._options = None -_DELETEUPTIMECHECKCONFIGREQUEST.fields_by_name["name"]._options = None - -_UPTIMECHECKSERVICE = _descriptor.ServiceDescriptor( - name="UptimeCheckService", - full_name="google.monitoring.v3.UptimeCheckService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\031monitoring.googleapis.com\322A\211\001https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/monitoring.read", - serialized_start=1353, - serialized_end=2694, - methods=[ - _descriptor.MethodDescriptor( - name="ListUptimeCheckConfigs", - full_name="google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs", - index=0, - containing_service=None, - input_type=_LISTUPTIMECHECKCONFIGSREQUEST, - output_type=_LISTUPTIMECHECKCONFIGSRESPONSE, - serialized_options=b"\202\323\344\223\002,\022*/v3/{parent=projects/*}/uptimeCheckConfigs\332A\006parent", - ), - _descriptor.MethodDescriptor( - name="GetUptimeCheckConfig", - full_name="google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig", - index=1, - containing_service=None, - input_type=_GETUPTIMECHECKCONFIGREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG, - serialized_options=b"\202\323\344\223\002,\022*/v3/{name=projects/*/uptimeCheckConfigs/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="CreateUptimeCheckConfig", - full_name="google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig", - index=2, - containing_service=None, - input_type=_CREATEUPTIMECHECKCONFIGREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG, - serialized_options=b'\202\323\344\223\002A"*/v3/{parent=projects/*}/uptimeCheckConfigs:\023uptime_check_config\332A\032parent,uptime_check_config', - ), - _descriptor.MethodDescriptor( - name="UpdateUptimeCheckConfig", - full_name="google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig", - index=3, - containing_service=None, - input_type=_UPDATEUPTIMECHECKCONFIGREQUEST, - output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG, - serialized_options=b"\202\323\344\223\002U2>/v3/{uptime_check_config.name=projects/*/uptimeCheckConfigs/*}:\023uptime_check_config\332A\023uptime_check_config", - ), - _descriptor.MethodDescriptor( - name="DeleteUptimeCheckConfig", - full_name="google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig", - index=4, - containing_service=None, - input_type=_DELETEUPTIMECHECKCONFIGREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002,**/v3/{name=projects/*/uptimeCheckConfigs/*}\332A\004name", - ), - _descriptor.MethodDescriptor( - name="ListUptimeCheckIps", - full_name="google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps", - index=5, - containing_service=None, - input_type=_LISTUPTIMECHECKIPSREQUEST, - output_type=_LISTUPTIMECHECKIPSRESPONSE, - serialized_options=b"\202\323\344\223\002\024\022\022/v3/uptimeCheckIps", - ), - ], -) -_sym_db.RegisterServiceDescriptor(_UPTIMECHECKSERVICE) - -DESCRIPTOR.services_by_name["UptimeCheckService"] = _UPTIMECHECKSERVICE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/monitoring_v3/proto/uptime_service_pb2_grpc.py b/google/cloud/monitoring_v3/proto/uptime_service_pb2_grpc.py deleted file mode 100644 index 9835d01f..00000000 --- a/google/cloud/monitoring_v3/proto/uptime_service_pb2_grpc.py +++ /dev/null @@ -1,158 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.monitoring_v3.proto import ( - uptime_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2, -) -from google.cloud.monitoring_v3.proto import ( - uptime_service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class UptimeCheckServiceStub(object): - """The UptimeCheckService API is used to manage (list, create, delete, edit) - Uptime check configurations in the Stackdriver Monitoring product. An Uptime - check is a piece of configuration that determines which resources and - services to monitor for availability. These configurations can also be - configured interactively by navigating to the [Cloud Console] - (http://console.cloud.google.com), selecting the appropriate project, - clicking on "Monitoring" on the left-hand side to navigate to Stackdriver, - and then clicking on "Uptime". - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ListUptimeCheckConfigs = channel.unary_unary( - "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsResponse.FromString, - ) - self.GetUptimeCheckConfig = channel.unary_unary( - "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.GetUptimeCheckConfigRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString, - ) - self.CreateUptimeCheckConfig = channel.unary_unary( - "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.CreateUptimeCheckConfigRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString, - ) - self.UpdateUptimeCheckConfig = channel.unary_unary( - "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.UpdateUptimeCheckConfigRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString, - ) - self.DeleteUptimeCheckConfig = channel.unary_unary( - "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.DeleteUptimeCheckConfigRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ListUptimeCheckIps = channel.unary_unary( - "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", - request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsResponse.FromString, - ) - - -class UptimeCheckServiceServicer(object): - """The UptimeCheckService API is used to manage (list, create, delete, edit) - Uptime check configurations in the Stackdriver Monitoring product. An Uptime - check is a piece of configuration that determines which resources and - services to monitor for availability. These configurations can also be - configured interactively by navigating to the [Cloud Console] - (http://console.cloud.google.com), selecting the appropriate project, - clicking on "Monitoring" on the left-hand side to navigate to Stackdriver, - and then clicking on "Uptime". - """ - - def ListUptimeCheckConfigs(self, request, context): - """Lists the existing valid Uptime check configurations for the project - (leaving out any invalid configurations). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetUptimeCheckConfig(self, request, context): - """Gets a single Uptime check configuration. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateUptimeCheckConfig(self, request, context): - """Creates a new Uptime check configuration. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateUptimeCheckConfig(self, request, context): - """Updates an Uptime check configuration. You can either replace the entire - configuration with a new one or replace only certain fields in the current - configuration by specifying the fields to be updated via `updateMask`. - Returns the updated configuration. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteUptimeCheckConfig(self, request, context): - """Deletes an Uptime check configuration. Note that this method will fail - if the Uptime check configuration is referenced by an alert policy or - other dependent configs that would be rendered invalid by the deletion. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListUptimeCheckIps(self, request, context): - """Returns the list of IP addresses that checkers run from - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_UptimeCheckServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListUptimeCheckConfigs": grpc.unary_unary_rpc_method_handler( - servicer.ListUptimeCheckConfigs, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsResponse.SerializeToString, - ), - "GetUptimeCheckConfig": grpc.unary_unary_rpc_method_handler( - servicer.GetUptimeCheckConfig, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.GetUptimeCheckConfigRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.SerializeToString, - ), - "CreateUptimeCheckConfig": grpc.unary_unary_rpc_method_handler( - servicer.CreateUptimeCheckConfig, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.CreateUptimeCheckConfigRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.SerializeToString, - ), - "UpdateUptimeCheckConfig": grpc.unary_unary_rpc_method_handler( - servicer.UpdateUptimeCheckConfig, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.UpdateUptimeCheckConfigRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.SerializeToString, - ), - "DeleteUptimeCheckConfig": grpc.unary_unary_rpc_method_handler( - servicer.DeleteUptimeCheckConfig, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.DeleteUptimeCheckConfigRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ListUptimeCheckIps": grpc.unary_unary_rpc_method_handler( - servicer.ListUptimeCheckIps, - request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsRequest.FromString, - response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.monitoring.v3.UptimeCheckService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/google/cloud/monitoring_v3/py.typed b/google/cloud/monitoring_v3/py.typed new file mode 100644 index 00000000..55d895b0 --- /dev/null +++ b/google/cloud/monitoring_v3/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-monitoring package uses inline types. diff --git a/google/cloud/monitoring_v3/query.py b/google/cloud/monitoring_v3/query.py index da7d74db..95f80848 100644 --- a/google/cloud/monitoring_v3/query.py +++ b/google/cloud/monitoring_v3/query.py @@ -24,9 +24,9 @@ import six +import google.cloud.monitoring_v3 as monitoring_v3 from google.cloud.monitoring_v3 import _dataframe from google.cloud.monitoring_v3 import types -from google.cloud.monitoring_v3.gapic import enums _UTCNOW = datetime.datetime.utcnow # To be replaced by tests. @@ -103,7 +103,7 @@ def __init__( raise ValueError("Non-zero duration required for time interval.") self._client = client - self._project_path = self._client.project_path(project) + self._project_path = f"projects/{project}" self._end_time = end_time self._start_time = start_time self._filter = _Filter(metric_type) @@ -334,20 +334,20 @@ def align(self, per_series_aligner, seconds=0, minutes=0, hours=0): Example:: - from google.cloud.monitoring import enums + from google.cloud import monitoring query = query.align( - enums.Aggregation.Aligner.ALIGN_MEAN, minutes=5) + monitoring.Aggregation.Aligner.ALIGN_MEAN, minutes=5) It is also possible to specify the aligner as a literal string:: query = query.align('ALIGN_MEAN', minutes=5) :type per_series_aligner: str or - :class:`~google.cloud.monitoring_v3.gapic.enums.Aggregation.Aligner` + :class:`~google.cloud.monitoring_v3.Aggregation.Aligner` :param per_series_aligner: The approach to be used to align individual time series. For example: :data:`Aligner.ALIGN_MEAN`. See - :class:`~google.cloud.monitoring_v3.gapic.enums.Aggregation.Aligner` + :class:`~google.cloud.monitoring_v3.Aggregation.Aligner` and the descriptions of the `supported aligners`_. :type seconds: int @@ -380,16 +380,16 @@ def reduce(self, cross_series_reducer, *group_by_fields): For example, you could request an aggregated time series for each combination of project and zone as follows:: - from google.cloud.monitoring import enums - query = query.reduce(enums.Aggregation.Reducer.REDUCE_MEAN, + from google.cloud import monitoring + query = query.reduce(monitoring.Aggregation.Reducer.REDUCE_MEAN, 'resource.project_id', 'resource.zone') :type cross_series_reducer: str or - :class:`~google.cloud.monitoring_v3.gapic.enums.Aggregation.Reducer` + :class:`~google.cloud.monitoring_v3.Aggregation.Reducer` :param cross_series_reducer: The approach to be used to combine time series. For example: :data:`Reducer.REDUCE_MEAN`. See - :class:`~google.cloud.monitoring_v3.gapic.enums.Aggregation.Reducer` + :class:`~google.cloud.monitoring_v3.Aggregation.Reducer` and the descriptions of the `supported reducers`_. :type group_by_fields: strs @@ -460,12 +460,13 @@ def _build_query_params(self, headers_only=False, page_size=None): from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. """ - params = {"name": self._project_path, "filter_": self.filter} - - params["interval"] = types.TimeInterval() - params["interval"].end_time.FromDatetime(self._end_time) - if self._start_time: - params["interval"].start_time.FromDatetime(self._start_time) + params = { + "name": self._project_path, + "filter": self.filter, + "interval": types.TimeInterval( + start_time=self._start_time, end_time=self._end_time + ), + } if ( self._per_series_aligner @@ -480,10 +481,8 @@ def _build_query_params(self, headers_only=False, page_size=None): alignment_period={"seconds": self._alignment_period_seconds}, ) - if headers_only: - params["view"] = enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS - else: - params["view"] = enums.ListTimeSeriesRequest.TimeSeriesView.FULL + tsv = monitoring_v3.ListTimeSeriesRequest.TimeSeriesView + params["view"] = tsv.HEADERS if headers_only else tsv.FULL if page_size is not None: params["page_size"] = page_size diff --git a/google/cloud/monitoring_v3/services/__init__.py b/google/cloud/monitoring_v3/services/__init__.py new file mode 100644 index 00000000..42ffdf2b --- /dev/null +++ b/google/cloud/monitoring_v3/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/monitoring_v3/services/alert_policy_service/__init__.py b/google/cloud/monitoring_v3/services/alert_policy_service/__init__.py new file mode 100644 index 00000000..b5aed67b --- /dev/null +++ b/google/cloud/monitoring_v3/services/alert_policy_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import AlertPolicyServiceClient +from .async_client import AlertPolicyServiceAsyncClient + +__all__ = ( + "AlertPolicyServiceClient", + "AlertPolicyServiceAsyncClient", +) diff --git a/google/cloud/monitoring_v3/services/alert_policy_service/async_client.py b/google/cloud/monitoring_v3/services/alert_policy_service/async_client.py new file mode 100644 index 00000000..d03d1031 --- /dev/null +++ b/google/cloud/monitoring_v3/services/alert_policy_service/async_client.py @@ -0,0 +1,642 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.monitoring_v3.services.alert_policy_service import pagers +from google.cloud.monitoring_v3.types import alert +from google.cloud.monitoring_v3.types import alert_service +from google.cloud.monitoring_v3.types import mutation_record +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore +from google.rpc import status_pb2 as status # type: ignore + +from .transports.base import AlertPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import AlertPolicyServiceGrpcAsyncIOTransport +from .client import AlertPolicyServiceClient + + +class AlertPolicyServiceAsyncClient: + """The AlertPolicyService API is used to manage (list, create, delete, + edit) alert policies in Stackdriver Monitoring. An alerting policy + is a description of the conditions under which some aspect of your + system is considered to be "unhealthy" and the ways to notify people + or services about this state. In addition to using this API, alert + policies can also be managed through `Stackdriver + Monitoring `__, which can + be reached by clicking the "Monitoring" tab in `Cloud + Console `__. + """ + + _client: AlertPolicyServiceClient + + DEFAULT_ENDPOINT = AlertPolicyServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = AlertPolicyServiceClient.DEFAULT_MTLS_ENDPOINT + + alert_policy_path = staticmethod(AlertPolicyServiceClient.alert_policy_path) + parse_alert_policy_path = staticmethod( + AlertPolicyServiceClient.parse_alert_policy_path + ) + alert_policy_condition_path = staticmethod( + AlertPolicyServiceClient.alert_policy_condition_path + ) + parse_alert_policy_condition_path = staticmethod( + AlertPolicyServiceClient.parse_alert_policy_condition_path + ) + + common_project_path = staticmethod(AlertPolicyServiceClient.common_project_path) + parse_common_project_path = staticmethod( + AlertPolicyServiceClient.parse_common_project_path + ) + + common_organization_path = staticmethod( + AlertPolicyServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + AlertPolicyServiceClient.parse_common_organization_path + ) + + common_folder_path = staticmethod(AlertPolicyServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + AlertPolicyServiceClient.parse_common_folder_path + ) + + common_billing_account_path = staticmethod( + AlertPolicyServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + AlertPolicyServiceClient.parse_common_billing_account_path + ) + + common_location_path = staticmethod(AlertPolicyServiceClient.common_location_path) + parse_common_location_path = staticmethod( + AlertPolicyServiceClient.parse_common_location_path + ) + + from_service_account_file = AlertPolicyServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(AlertPolicyServiceClient).get_transport_class, + type(AlertPolicyServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, AlertPolicyServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the alert policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AlertPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = AlertPolicyServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_alert_policies( + self, + request: alert_service.ListAlertPoliciesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAlertPoliciesAsyncPager: + r"""Lists the existing alerting policies for the project. + + Args: + request (:class:`~.alert_service.ListAlertPoliciesRequest`): + The request object. The protocol for the + `ListAlertPolicies` request. + name (:class:`str`): + Required. The project whose alert policies are to be + listed. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this field names the parent container in which + the alerting policies to be listed are stored. To + retrieve a single alerting policy by name, use the + [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + operation, instead. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListAlertPoliciesAsyncPager: + The protocol for the ``ListAlertPolicies`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = alert_service.ListAlertPoliciesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_alert_policies, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAlertPoliciesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_alert_policy( + self, + request: alert_service.GetAlertPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> alert.AlertPolicy: + r"""Gets a single alerting policy. + + Args: + request (:class:`~.alert_service.GetAlertPolicyRequest`): + The request object. The protocol for the + `GetAlertPolicy` request. + name (:class:`str`): + Required. The alerting policy to retrieve. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.alert.AlertPolicy: + A description of the conditions under which some aspect + of your system is considered to be "unhealthy" and the + ways to notify people or services about this state. For + an overview of alert policies, see `Introduction to + Alerting `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = alert_service.GetAlertPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_alert_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_alert_policy( + self, + request: alert_service.CreateAlertPolicyRequest = None, + *, + name: str = None, + alert_policy: alert.AlertPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> alert.AlertPolicy: + r"""Creates a new alerting policy. + + Args: + request (:class:`~.alert_service.CreateAlertPolicyRequest`): + The request object. The protocol for the + `CreateAlertPolicy` request. + name (:class:`str`): + Required. The project in which to create the alerting + policy. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this field names the parent container in which + the alerting policy will be written, not the name of the + created policy. The alerting policy that is returned + will have a name that contains a normalized + representation of this name as a prefix but adds a + suffix of the form ``/alertPolicies/[ALERT_POLICY_ID]``, + identifying the policy in the container. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + alert_policy (:class:`~.alert.AlertPolicy`): + Required. The requested alerting policy. You should omit + the ``name`` field in this policy. The name will be + returned in the new policy, including a new + ``[ALERT_POLICY_ID]`` value. + This corresponds to the ``alert_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.alert.AlertPolicy: + A description of the conditions under which some aspect + of your system is considered to be "unhealthy" and the + ways to notify people or services about this state. For + an overview of alert policies, see `Introduction to + Alerting `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, alert_policy]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = alert_service.CreateAlertPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if alert_policy is not None: + request.alert_policy = alert_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_alert_policy, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_alert_policy( + self, + request: alert_service.DeleteAlertPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an alerting policy. + + Args: + request (:class:`~.alert_service.DeleteAlertPolicyRequest`): + The request object. The protocol for the + `DeleteAlertPolicy` request. + name (:class:`str`): + Required. The alerting policy to delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + + For more information, see + [AlertPolicy][google.monitoring.v3.AlertPolicy]. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = alert_service.DeleteAlertPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_alert_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def update_alert_policy( + self, + request: alert_service.UpdateAlertPolicyRequest = None, + *, + update_mask: field_mask.FieldMask = None, + alert_policy: alert.AlertPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> alert.AlertPolicy: + r"""Updates an alerting policy. You can either replace the entire + policy with a new one or replace only certain fields in the + current alerting policy by specifying the fields to be updated + via ``updateMask``. Returns the updated alerting policy. + + Args: + request (:class:`~.alert_service.UpdateAlertPolicyRequest`): + The request object. The protocol for the + `UpdateAlertPolicy` request. + update_mask (:class:`~.field_mask.FieldMask`): + Optional. A list of alerting policy field names. If this + field is not empty, each listed field in the existing + alerting policy is set to the value of the corresponding + field in the supplied policy (``alert_policy``), or to + the field's default value if the field is not in the + supplied alerting policy. Fields not listed retain their + previous value. + + Examples of valid field masks include ``display_name``, + ``documentation``, ``documentation.content``, + ``documentation.mime_type``, ``user_labels``, + ``user_label.nameofkey``, ``enabled``, ``conditions``, + ``combiner``, etc. + + If this field is empty, then the supplied alerting + policy replaces the existing policy. It is the same as + deleting the existing policy and adding the supplied + policy, except for the following: + + - The new policy will have the same + ``[ALERT_POLICY_ID]`` as the former policy. This + gives you continuity with the former policy in your + notifications and incidents. + - Conditions in the new policy will keep their former + ``[CONDITION_ID]`` if the supplied condition includes + the ``name`` field with that ``[CONDITION_ID]``. If + the supplied condition omits the ``name`` field, then + a new ``[CONDITION_ID]`` is created. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + alert_policy (:class:`~.alert.AlertPolicy`): + Required. The updated alerting policy or the updated + values for the fields listed in ``update_mask``. If + ``update_mask`` is not empty, any fields in this policy + that are not in ``update_mask`` are ignored. + This corresponds to the ``alert_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.alert.AlertPolicy: + A description of the conditions under which some aspect + of your system is considered to be "unhealthy" and the + ways to notify people or services about this state. For + an overview of alert policies, see `Introduction to + Alerting `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([update_mask, alert_policy]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = alert_service.UpdateAlertPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if update_mask is not None: + request.update_mask = update_mask + if alert_policy is not None: + request.alert_policy = alert_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_alert_policy, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("alert_policy.name", request.alert_policy.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AlertPolicyServiceAsyncClient",) diff --git a/google/cloud/monitoring_v3/services/alert_policy_service/client.py b/google/cloud/monitoring_v3/services/alert_policy_service/client.py new file mode 100644 index 00000000..5bd324d7 --- /dev/null +++ b/google/cloud/monitoring_v3/services/alert_policy_service/client.py @@ -0,0 +1,829 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.monitoring_v3.services.alert_policy_service import pagers +from google.cloud.monitoring_v3.types import alert +from google.cloud.monitoring_v3.types import alert_service +from google.cloud.monitoring_v3.types import mutation_record +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore +from google.rpc import status_pb2 as status # type: ignore + +from .transports.base import AlertPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import AlertPolicyServiceGrpcTransport +from .transports.grpc_asyncio import AlertPolicyServiceGrpcAsyncIOTransport + + +class AlertPolicyServiceClientMeta(type): + """Metaclass for the AlertPolicyService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[AlertPolicyServiceTransport]] + _transport_registry["grpc"] = AlertPolicyServiceGrpcTransport + _transport_registry["grpc_asyncio"] = AlertPolicyServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[AlertPolicyServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AlertPolicyServiceClient(metaclass=AlertPolicyServiceClientMeta): + """The AlertPolicyService API is used to manage (list, create, delete, + edit) alert policies in Stackdriver Monitoring. An alerting policy + is a description of the conditions under which some aspect of your + system is considered to be "unhealthy" and the ways to notify people + or services about this state. In addition to using this API, alert + policies can also be managed through `Stackdriver + Monitoring `__, which can + be reached by clicking the "Monitoring" tab in `Cloud + Console `__. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "monitoring.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def alert_policy_path(project: str, alert_policy: str,) -> str: + """Return a fully-qualified alert_policy string.""" + return "projects/{project}/alertPolicies/{alert_policy}".format( + project=project, alert_policy=alert_policy, + ) + + @staticmethod + def parse_alert_policy_path(path: str) -> Dict[str, str]: + """Parse a alert_policy path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/alertPolicies/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def alert_policy_condition_path( + project: str, alert_policy: str, condition: str, + ) -> str: + """Return a fully-qualified alert_policy_condition string.""" + return "projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}".format( + project=project, alert_policy=alert_policy, condition=condition, + ) + + @staticmethod + def parse_alert_policy_condition_path(path: str) -> Dict[str, str]: + """Parse a alert_policy_condition path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/alertPolicies/(?P.+?)/conditions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, AlertPolicyServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the alert policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AlertPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AlertPolicyServiceTransport): + # transport is a AlertPolicyServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_alert_policies( + self, + request: alert_service.ListAlertPoliciesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAlertPoliciesPager: + r"""Lists the existing alerting policies for the project. + + Args: + request (:class:`~.alert_service.ListAlertPoliciesRequest`): + The request object. The protocol for the + `ListAlertPolicies` request. + name (:class:`str`): + Required. The project whose alert policies are to be + listed. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this field names the parent container in which + the alerting policies to be listed are stored. To + retrieve a single alerting policy by name, use the + [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + operation, instead. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListAlertPoliciesPager: + The protocol for the ``ListAlertPolicies`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a alert_service.ListAlertPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, alert_service.ListAlertPoliciesRequest): + request = alert_service.ListAlertPoliciesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_alert_policies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAlertPoliciesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_alert_policy( + self, + request: alert_service.GetAlertPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> alert.AlertPolicy: + r"""Gets a single alerting policy. + + Args: + request (:class:`~.alert_service.GetAlertPolicyRequest`): + The request object. The protocol for the + `GetAlertPolicy` request. + name (:class:`str`): + Required. The alerting policy to retrieve. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.alert.AlertPolicy: + A description of the conditions under which some aspect + of your system is considered to be "unhealthy" and the + ways to notify people or services about this state. For + an overview of alert policies, see `Introduction to + Alerting `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a alert_service.GetAlertPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, alert_service.GetAlertPolicyRequest): + request = alert_service.GetAlertPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_alert_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_alert_policy( + self, + request: alert_service.CreateAlertPolicyRequest = None, + *, + name: str = None, + alert_policy: alert.AlertPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> alert.AlertPolicy: + r"""Creates a new alerting policy. + + Args: + request (:class:`~.alert_service.CreateAlertPolicyRequest`): + The request object. The protocol for the + `CreateAlertPolicy` request. + name (:class:`str`): + Required. The project in which to create the alerting + policy. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this field names the parent container in which + the alerting policy will be written, not the name of the + created policy. The alerting policy that is returned + will have a name that contains a normalized + representation of this name as a prefix but adds a + suffix of the form ``/alertPolicies/[ALERT_POLICY_ID]``, + identifying the policy in the container. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + alert_policy (:class:`~.alert.AlertPolicy`): + Required. The requested alerting policy. You should omit + the ``name`` field in this policy. The name will be + returned in the new policy, including a new + ``[ALERT_POLICY_ID]`` value. + This corresponds to the ``alert_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.alert.AlertPolicy: + A description of the conditions under which some aspect + of your system is considered to be "unhealthy" and the + ways to notify people or services about this state. For + an overview of alert policies, see `Introduction to + Alerting `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, alert_policy]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a alert_service.CreateAlertPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, alert_service.CreateAlertPolicyRequest): + request = alert_service.CreateAlertPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if alert_policy is not None: + request.alert_policy = alert_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_alert_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_alert_policy( + self, + request: alert_service.DeleteAlertPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an alerting policy. + + Args: + request (:class:`~.alert_service.DeleteAlertPolicyRequest`): + The request object. The protocol for the + `DeleteAlertPolicy` request. + name (:class:`str`): + Required. The alerting policy to delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + + For more information, see + [AlertPolicy][google.monitoring.v3.AlertPolicy]. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a alert_service.DeleteAlertPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, alert_service.DeleteAlertPolicyRequest): + request = alert_service.DeleteAlertPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_alert_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def update_alert_policy( + self, + request: alert_service.UpdateAlertPolicyRequest = None, + *, + update_mask: field_mask.FieldMask = None, + alert_policy: alert.AlertPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> alert.AlertPolicy: + r"""Updates an alerting policy. You can either replace the entire + policy with a new one or replace only certain fields in the + current alerting policy by specifying the fields to be updated + via ``updateMask``. Returns the updated alerting policy. + + Args: + request (:class:`~.alert_service.UpdateAlertPolicyRequest`): + The request object. The protocol for the + `UpdateAlertPolicy` request. + update_mask (:class:`~.field_mask.FieldMask`): + Optional. A list of alerting policy field names. If this + field is not empty, each listed field in the existing + alerting policy is set to the value of the corresponding + field in the supplied policy (``alert_policy``), or to + the field's default value if the field is not in the + supplied alerting policy. Fields not listed retain their + previous value. + + Examples of valid field masks include ``display_name``, + ``documentation``, ``documentation.content``, + ``documentation.mime_type``, ``user_labels``, + ``user_label.nameofkey``, ``enabled``, ``conditions``, + ``combiner``, etc. + + If this field is empty, then the supplied alerting + policy replaces the existing policy. It is the same as + deleting the existing policy and adding the supplied + policy, except for the following: + + - The new policy will have the same + ``[ALERT_POLICY_ID]`` as the former policy. This + gives you continuity with the former policy in your + notifications and incidents. + - Conditions in the new policy will keep their former + ``[CONDITION_ID]`` if the supplied condition includes + the ``name`` field with that ``[CONDITION_ID]``. If + the supplied condition omits the ``name`` field, then + a new ``[CONDITION_ID]`` is created. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + alert_policy (:class:`~.alert.AlertPolicy`): + Required. The updated alerting policy or the updated + values for the fields listed in ``update_mask``. If + ``update_mask`` is not empty, any fields in this policy + that are not in ``update_mask`` are ignored. + This corresponds to the ``alert_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.alert.AlertPolicy: + A description of the conditions under which some aspect + of your system is considered to be "unhealthy" and the + ways to notify people or services about this state. For + an overview of alert policies, see `Introduction to + Alerting `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([update_mask, alert_policy]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a alert_service.UpdateAlertPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, alert_service.UpdateAlertPolicyRequest): + request = alert_service.UpdateAlertPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if update_mask is not None: + request.update_mask = update_mask + if alert_policy is not None: + request.alert_policy = alert_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_alert_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("alert_policy.name", request.alert_policy.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AlertPolicyServiceClient",) diff --git a/google/cloud/monitoring_v3/services/alert_policy_service/pagers.py b/google/cloud/monitoring_v3/services/alert_policy_service/pagers.py new file mode 100644 index 00000000..4c486cb7 --- /dev/null +++ b/google/cloud/monitoring_v3/services/alert_policy_service/pagers.py @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.monitoring_v3.types import alert +from google.cloud.monitoring_v3.types import alert_service + + +class ListAlertPoliciesPager: + """A pager for iterating through ``list_alert_policies`` requests. + + This class thinly wraps an initial + :class:`~.alert_service.ListAlertPoliciesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``alert_policies`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAlertPolicies`` requests and continue to iterate + through the ``alert_policies`` field on the + corresponding responses. + + All the usual :class:`~.alert_service.ListAlertPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., alert_service.ListAlertPoliciesResponse], + request: alert_service.ListAlertPoliciesRequest, + response: alert_service.ListAlertPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.alert_service.ListAlertPoliciesRequest`): + The initial request object. + response (:class:`~.alert_service.ListAlertPoliciesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = alert_service.ListAlertPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[alert_service.ListAlertPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[alert.AlertPolicy]: + for page in self.pages: + yield from page.alert_policies + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListAlertPoliciesAsyncPager: + """A pager for iterating through ``list_alert_policies`` requests. + + This class thinly wraps an initial + :class:`~.alert_service.ListAlertPoliciesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``alert_policies`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAlertPolicies`` requests and continue to iterate + through the ``alert_policies`` field on the + corresponding responses. + + All the usual :class:`~.alert_service.ListAlertPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[alert_service.ListAlertPoliciesResponse]], + request: alert_service.ListAlertPoliciesRequest, + response: alert_service.ListAlertPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.alert_service.ListAlertPoliciesRequest`): + The initial request object. + response (:class:`~.alert_service.ListAlertPoliciesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = alert_service.ListAlertPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[alert_service.ListAlertPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[alert.AlertPolicy]: + async def async_generator(): + async for page in self.pages: + for response in page.alert_policies: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/monitoring_v3/services/alert_policy_service/transports/__init__.py b/google/cloud/monitoring_v3/services/alert_policy_service/transports/__init__.py new file mode 100644 index 00000000..8044932e --- /dev/null +++ b/google/cloud/monitoring_v3/services/alert_policy_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import AlertPolicyServiceTransport +from .grpc import AlertPolicyServiceGrpcTransport +from .grpc_asyncio import AlertPolicyServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[AlertPolicyServiceTransport]] +_transport_registry["grpc"] = AlertPolicyServiceGrpcTransport +_transport_registry["grpc_asyncio"] = AlertPolicyServiceGrpcAsyncIOTransport + + +__all__ = ( + "AlertPolicyServiceTransport", + "AlertPolicyServiceGrpcTransport", + "AlertPolicyServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/monitoring_v3/services/alert_policy_service/transports/base.py b/google/cloud/monitoring_v3/services/alert_policy_service/transports/base.py new file mode 100644 index 00000000..d79536e7 --- /dev/null +++ b/google/cloud/monitoring_v3/services/alert_policy_service/transports/base.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.monitoring_v3.types import alert +from google.cloud.monitoring_v3.types import alert_service +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class AlertPolicyServiceTransport(abc.ABC): + """Abstract transport class for AlertPolicyService.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_alert_policies: gapic_v1.method.wrap_method( + self.list_alert_policies, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.get_alert_policy: gapic_v1.method.wrap_method( + self.get_alert_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.create_alert_policy: gapic_v1.method.wrap_method( + self.create_alert_policy, default_timeout=30.0, client_info=client_info, + ), + self.delete_alert_policy: gapic_v1.method.wrap_method( + self.delete_alert_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.update_alert_policy: gapic_v1.method.wrap_method( + self.update_alert_policy, default_timeout=30.0, client_info=client_info, + ), + } + + @property + def list_alert_policies( + self, + ) -> typing.Callable[ + [alert_service.ListAlertPoliciesRequest], + typing.Union[ + alert_service.ListAlertPoliciesResponse, + typing.Awaitable[alert_service.ListAlertPoliciesResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_alert_policy( + self, + ) -> typing.Callable[ + [alert_service.GetAlertPolicyRequest], + typing.Union[alert.AlertPolicy, typing.Awaitable[alert.AlertPolicy]], + ]: + raise NotImplementedError() + + @property + def create_alert_policy( + self, + ) -> typing.Callable[ + [alert_service.CreateAlertPolicyRequest], + typing.Union[alert.AlertPolicy, typing.Awaitable[alert.AlertPolicy]], + ]: + raise NotImplementedError() + + @property + def delete_alert_policy( + self, + ) -> typing.Callable[ + [alert_service.DeleteAlertPolicyRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def update_alert_policy( + self, + ) -> typing.Callable[ + [alert_service.UpdateAlertPolicyRequest], + typing.Union[alert.AlertPolicy, typing.Awaitable[alert.AlertPolicy]], + ]: + raise NotImplementedError() + + +__all__ = ("AlertPolicyServiceTransport",) diff --git a/google/cloud/monitoring_v3/services/alert_policy_service/transports/grpc.py b/google/cloud/monitoring_v3/services/alert_policy_service/transports/grpc.py new file mode 100644 index 00000000..069ab218 --- /dev/null +++ b/google/cloud/monitoring_v3/services/alert_policy_service/transports/grpc.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.monitoring_v3.types import alert +from google.cloud.monitoring_v3.types import alert_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import AlertPolicyServiceTransport, DEFAULT_CLIENT_INFO + + +class AlertPolicyServiceGrpcTransport(AlertPolicyServiceTransport): + """gRPC backend transport for AlertPolicyService. + + The AlertPolicyService API is used to manage (list, create, delete, + edit) alert policies in Stackdriver Monitoring. An alerting policy + is a description of the conditions under which some aspect of your + system is considered to be "unhealthy" and the ways to notify people + or services about this state. In addition to using this API, alert + policies can also be managed through `Stackdriver + Monitoring `__, which can + be reached by clicking the "Monitoring" tab in `Cloud + Console `__. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_alert_policies( + self, + ) -> Callable[ + [alert_service.ListAlertPoliciesRequest], + alert_service.ListAlertPoliciesResponse, + ]: + r"""Return a callable for the list alert policies method over gRPC. + + Lists the existing alerting policies for the project. + + Returns: + Callable[[~.ListAlertPoliciesRequest], + ~.ListAlertPoliciesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_alert_policies" not in self._stubs: + self._stubs["list_alert_policies"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + request_serializer=alert_service.ListAlertPoliciesRequest.serialize, + response_deserializer=alert_service.ListAlertPoliciesResponse.deserialize, + ) + return self._stubs["list_alert_policies"] + + @property + def get_alert_policy( + self, + ) -> Callable[[alert_service.GetAlertPolicyRequest], alert.AlertPolicy]: + r"""Return a callable for the get alert policy method over gRPC. + + Gets a single alerting policy. + + Returns: + Callable[[~.GetAlertPolicyRequest], + ~.AlertPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_alert_policy" not in self._stubs: + self._stubs["get_alert_policy"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + request_serializer=alert_service.GetAlertPolicyRequest.serialize, + response_deserializer=alert.AlertPolicy.deserialize, + ) + return self._stubs["get_alert_policy"] + + @property + def create_alert_policy( + self, + ) -> Callable[[alert_service.CreateAlertPolicyRequest], alert.AlertPolicy]: + r"""Return a callable for the create alert policy method over gRPC. + + Creates a new alerting policy. + + Returns: + Callable[[~.CreateAlertPolicyRequest], + ~.AlertPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_alert_policy" not in self._stubs: + self._stubs["create_alert_policy"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + request_serializer=alert_service.CreateAlertPolicyRequest.serialize, + response_deserializer=alert.AlertPolicy.deserialize, + ) + return self._stubs["create_alert_policy"] + + @property + def delete_alert_policy( + self, + ) -> Callable[[alert_service.DeleteAlertPolicyRequest], empty.Empty]: + r"""Return a callable for the delete alert policy method over gRPC. + + Deletes an alerting policy. + + Returns: + Callable[[~.DeleteAlertPolicyRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_alert_policy" not in self._stubs: + self._stubs["delete_alert_policy"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + request_serializer=alert_service.DeleteAlertPolicyRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_alert_policy"] + + @property + def update_alert_policy( + self, + ) -> Callable[[alert_service.UpdateAlertPolicyRequest], alert.AlertPolicy]: + r"""Return a callable for the update alert policy method over gRPC. + + Updates an alerting policy. You can either replace the entire + policy with a new one or replace only certain fields in the + current alerting policy by specifying the fields to be updated + via ``updateMask``. Returns the updated alerting policy. + + Returns: + Callable[[~.UpdateAlertPolicyRequest], + ~.AlertPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_alert_policy" not in self._stubs: + self._stubs["update_alert_policy"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + request_serializer=alert_service.UpdateAlertPolicyRequest.serialize, + response_deserializer=alert.AlertPolicy.deserialize, + ) + return self._stubs["update_alert_policy"] + + +__all__ = ("AlertPolicyServiceGrpcTransport",) diff --git a/google/cloud/monitoring_v3/services/alert_policy_service/transports/grpc_asyncio.py b/google/cloud/monitoring_v3/services/alert_policy_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..82ca5a67 --- /dev/null +++ b/google/cloud/monitoring_v3/services/alert_policy_service/transports/grpc_asyncio.py @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.monitoring_v3.types import alert +from google.cloud.monitoring_v3.types import alert_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import AlertPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import AlertPolicyServiceGrpcTransport + + +class AlertPolicyServiceGrpcAsyncIOTransport(AlertPolicyServiceTransport): + """gRPC AsyncIO backend transport for AlertPolicyService. + + The AlertPolicyService API is used to manage (list, create, delete, + edit) alert policies in Stackdriver Monitoring. An alerting policy + is a description of the conditions under which some aspect of your + system is considered to be "unhealthy" and the ways to notify people + or services about this state. In addition to using this API, alert + policies can also be managed through `Stackdriver + Monitoring `__, which can + be reached by clicking the "Monitoring" tab in `Cloud + Console `__. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_alert_policies( + self, + ) -> Callable[ + [alert_service.ListAlertPoliciesRequest], + Awaitable[alert_service.ListAlertPoliciesResponse], + ]: + r"""Return a callable for the list alert policies method over gRPC. + + Lists the existing alerting policies for the project. + + Returns: + Callable[[~.ListAlertPoliciesRequest], + Awaitable[~.ListAlertPoliciesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_alert_policies" not in self._stubs: + self._stubs["list_alert_policies"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + request_serializer=alert_service.ListAlertPoliciesRequest.serialize, + response_deserializer=alert_service.ListAlertPoliciesResponse.deserialize, + ) + return self._stubs["list_alert_policies"] + + @property + def get_alert_policy( + self, + ) -> Callable[[alert_service.GetAlertPolicyRequest], Awaitable[alert.AlertPolicy]]: + r"""Return a callable for the get alert policy method over gRPC. + + Gets a single alerting policy. + + Returns: + Callable[[~.GetAlertPolicyRequest], + Awaitable[~.AlertPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_alert_policy" not in self._stubs: + self._stubs["get_alert_policy"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + request_serializer=alert_service.GetAlertPolicyRequest.serialize, + response_deserializer=alert.AlertPolicy.deserialize, + ) + return self._stubs["get_alert_policy"] + + @property + def create_alert_policy( + self, + ) -> Callable[ + [alert_service.CreateAlertPolicyRequest], Awaitable[alert.AlertPolicy] + ]: + r"""Return a callable for the create alert policy method over gRPC. + + Creates a new alerting policy. + + Returns: + Callable[[~.CreateAlertPolicyRequest], + Awaitable[~.AlertPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_alert_policy" not in self._stubs: + self._stubs["create_alert_policy"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + request_serializer=alert_service.CreateAlertPolicyRequest.serialize, + response_deserializer=alert.AlertPolicy.deserialize, + ) + return self._stubs["create_alert_policy"] + + @property + def delete_alert_policy( + self, + ) -> Callable[[alert_service.DeleteAlertPolicyRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete alert policy method over gRPC. + + Deletes an alerting policy. + + Returns: + Callable[[~.DeleteAlertPolicyRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_alert_policy" not in self._stubs: + self._stubs["delete_alert_policy"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + request_serializer=alert_service.DeleteAlertPolicyRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_alert_policy"] + + @property + def update_alert_policy( + self, + ) -> Callable[ + [alert_service.UpdateAlertPolicyRequest], Awaitable[alert.AlertPolicy] + ]: + r"""Return a callable for the update alert policy method over gRPC. + + Updates an alerting policy. You can either replace the entire + policy with a new one or replace only certain fields in the + current alerting policy by specifying the fields to be updated + via ``updateMask``. Returns the updated alerting policy. + + Returns: + Callable[[~.UpdateAlertPolicyRequest], + Awaitable[~.AlertPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_alert_policy" not in self._stubs: + self._stubs["update_alert_policy"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + request_serializer=alert_service.UpdateAlertPolicyRequest.serialize, + response_deserializer=alert.AlertPolicy.deserialize, + ) + return self._stubs["update_alert_policy"] + + +__all__ = ("AlertPolicyServiceGrpcAsyncIOTransport",) diff --git a/google/__init__.py b/google/cloud/monitoring_v3/services/group_service/__init__.py similarity index 71% rename from google/__init__.py rename to google/cloud/monitoring_v3/services/group_service/__init__.py index 9a1b64a6..8486d514 100644 --- a/google/__init__.py +++ b/google/cloud/monitoring_v3/services/group_service/__init__.py @@ -1,24 +1,24 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil +from .client import GroupServiceClient +from .async_client import GroupServiceAsyncClient - __path__ = pkgutil.extend_path(__path__, __name__) +__all__ = ( + "GroupServiceClient", + "GroupServiceAsyncClient", +) diff --git a/google/cloud/monitoring_v3/services/group_service/async_client.py b/google/cloud/monitoring_v3/services/group_service/async_client.py new file mode 100644 index 00000000..b9b7a048 --- /dev/null +++ b/google/cloud/monitoring_v3/services/group_service/async_client.py @@ -0,0 +1,752 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.services.group_service import pagers +from google.cloud.monitoring_v3.types import group +from google.cloud.monitoring_v3.types import group as gm_group +from google.cloud.monitoring_v3.types import group_service + +from .transports.base import GroupServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import GroupServiceGrpcAsyncIOTransport +from .client import GroupServiceClient + + +class GroupServiceAsyncClient: + """The Group API lets you inspect and manage your + `groups <#google.monitoring.v3.Group>`__. + + A group is a named filter that is used to identify a collection of + monitored resources. Groups are typically used to mirror the + physical and/or logical topology of the environment. Because group + membership is computed dynamically, monitored resources that are + started in the future are automatically placed in matching groups. + By using a group to name monitored resources in, for example, an + alert policy, the target of that alert policy is updated + automatically as monitored resources are added and removed from the + infrastructure. + """ + + _client: GroupServiceClient + + DEFAULT_ENDPOINT = GroupServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = GroupServiceClient.DEFAULT_MTLS_ENDPOINT + + group_path = staticmethod(GroupServiceClient.group_path) + parse_group_path = staticmethod(GroupServiceClient.parse_group_path) + + common_project_path = staticmethod(GroupServiceClient.common_project_path) + parse_common_project_path = staticmethod( + GroupServiceClient.parse_common_project_path + ) + + common_organization_path = staticmethod(GroupServiceClient.common_organization_path) + parse_common_organization_path = staticmethod( + GroupServiceClient.parse_common_organization_path + ) + + common_folder_path = staticmethod(GroupServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(GroupServiceClient.parse_common_folder_path) + + common_billing_account_path = staticmethod( + GroupServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + GroupServiceClient.parse_common_billing_account_path + ) + + common_location_path = staticmethod(GroupServiceClient.common_location_path) + parse_common_location_path = staticmethod( + GroupServiceClient.parse_common_location_path + ) + + from_service_account_file = GroupServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(GroupServiceClient).get_transport_class, type(GroupServiceClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, GroupServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the group service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.GroupServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = GroupServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_groups( + self, + request: group_service.ListGroupsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListGroupsAsyncPager: + r"""Lists the existing groups. + + Args: + request (:class:`~.group_service.ListGroupsRequest`): + The request object. The `ListGroup` request. + name (:class:`str`): + Required. The project whose groups are to be listed. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListGroupsAsyncPager: + The ``ListGroups`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = group_service.ListGroupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_groups, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListGroupsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_group( + self, + request: group_service.GetGroupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> group.Group: + r"""Gets a single group. + + Args: + request (:class:`~.group_service.GetGroupRequest`): + The request object. The `GetGroup` request. + name (:class:`str`): + Required. The group to retrieve. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.group.Group: + The description of a dynamic collection of monitored + resources. Each group has a filter that is matched + against monitored resources and their associated + metadata. If a group's filter matches an available + monitored resource, then that resource is a member of + that group. Groups can contain any number of monitored + resources, and each monitored resource can be a member + of any number of groups. + + Groups can be nested in parent-child hierarchies. The + ``parentName`` field identifies an optional parent for + each group. If a group has a parent, then the only + monitored resources available to be matched by the + group's filter are the resources contained in the parent + group. In other words, a group contains the monitored + resources that match its filter and the filters of all + the group's ancestors. A group without a parent can + contain any monitored resource. + + For example, consider an infrastructure running a set of + instances with two user-defined tags: ``"environment"`` + and ``"role"``. A parent group has a filter, + ``environment="production"``. A child of that parent + group has a filter, ``role="transcoder"``. The parent + group contains all instances in the production + environment, regardless of their roles. The child group + contains instances that have the transcoder role *and* + are in the production environment. + + The monitored resources contained in a group can change + at any moment, depending on what resources exist and + what filters are associated with the group and its + ancestors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = group_service.GetGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_group, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_group( + self, + request: group_service.CreateGroupRequest = None, + *, + name: str = None, + group: gm_group.Group = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gm_group.Group: + r"""Creates a new group. + + Args: + request (:class:`~.group_service.CreateGroupRequest`): + The request object. The `CreateGroup` request. + name (:class:`str`): + Required. The project in which to create the group. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + group (:class:`~.gm_group.Group`): + Required. A group definition. It is an error to define + the ``name`` field because the system assigns the name. + This corresponds to the ``group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gm_group.Group: + The description of a dynamic collection of monitored + resources. Each group has a filter that is matched + against monitored resources and their associated + metadata. If a group's filter matches an available + monitored resource, then that resource is a member of + that group. Groups can contain any number of monitored + resources, and each monitored resource can be a member + of any number of groups. + + Groups can be nested in parent-child hierarchies. The + ``parentName`` field identifies an optional parent for + each group. If a group has a parent, then the only + monitored resources available to be matched by the + group's filter are the resources contained in the parent + group. In other words, a group contains the monitored + resources that match its filter and the filters of all + the group's ancestors. A group without a parent can + contain any monitored resource. + + For example, consider an infrastructure running a set of + instances with two user-defined tags: ``"environment"`` + and ``"role"``. A parent group has a filter, + ``environment="production"``. A child of that parent + group has a filter, ``role="transcoder"``. The parent + group contains all instances in the production + environment, regardless of their roles. The child group + contains instances that have the transcoder role *and* + are in the production environment. + + The monitored resources contained in a group can change + at any moment, depending on what resources exist and + what filters are associated with the group and its + ancestors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, group]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = group_service.CreateGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if group is not None: + request.group = group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_group, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_group( + self, + request: group_service.UpdateGroupRequest = None, + *, + group: gm_group.Group = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gm_group.Group: + r"""Updates an existing group. You can change any group attributes + except ``name``. + + Args: + request (:class:`~.group_service.UpdateGroupRequest`): + The request object. The `UpdateGroup` request. + group (:class:`~.gm_group.Group`): + Required. The new definition of the group. All fields of + the existing group, excepting ``name``, are replaced + with the corresponding fields of this group. + This corresponds to the ``group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gm_group.Group: + The description of a dynamic collection of monitored + resources. Each group has a filter that is matched + against monitored resources and their associated + metadata. If a group's filter matches an available + monitored resource, then that resource is a member of + that group. Groups can contain any number of monitored + resources, and each monitored resource can be a member + of any number of groups. + + Groups can be nested in parent-child hierarchies. The + ``parentName`` field identifies an optional parent for + each group. If a group has a parent, then the only + monitored resources available to be matched by the + group's filter are the resources contained in the parent + group. In other words, a group contains the monitored + resources that match its filter and the filters of all + the group's ancestors. A group without a parent can + contain any monitored resource. + + For example, consider an infrastructure running a set of + instances with two user-defined tags: ``"environment"`` + and ``"role"``. A parent group has a filter, + ``environment="production"``. A child of that parent + group has a filter, ``role="transcoder"``. The parent + group contains all instances in the production + environment, regardless of their roles. The child group + contains instances that have the transcoder role *and* + are in the production environment. + + The monitored resources contained in a group can change + at any moment, depending on what resources exist and + what filters are associated with the group and its + ancestors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([group]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = group_service.UpdateGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if group is not None: + request.group = group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_group, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("group.name", request.group.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_group( + self, + request: group_service.DeleteGroupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an existing group. + + Args: + request (:class:`~.group_service.DeleteGroupRequest`): + The request object. The `DeleteGroup` request. The + default behavior is to be able to delete a single group + without any descendants. + name (:class:`str`): + Required. The group to delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = group_service.DeleteGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_group, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def list_group_members( + self, + request: group_service.ListGroupMembersRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListGroupMembersAsyncPager: + r"""Lists the monitored resources that are members of a + group. + + Args: + request (:class:`~.group_service.ListGroupMembersRequest`): + The request object. The `ListGroupMembers` request. + name (:class:`str`): + Required. The group whose members are listed. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListGroupMembersAsyncPager: + The ``ListGroupMembers`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = group_service.ListGroupMembersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_group_members, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListGroupMembersAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("GroupServiceAsyncClient",) diff --git a/google/cloud/monitoring_v3/services/group_service/client.py b/google/cloud/monitoring_v3/services/group_service/client.py new file mode 100644 index 00000000..461fc805 --- /dev/null +++ b/google/cloud/monitoring_v3/services/group_service/client.py @@ -0,0 +1,912 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.services.group_service import pagers +from google.cloud.monitoring_v3.types import group +from google.cloud.monitoring_v3.types import group as gm_group +from google.cloud.monitoring_v3.types import group_service + +from .transports.base import GroupServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import GroupServiceGrpcTransport +from .transports.grpc_asyncio import GroupServiceGrpcAsyncIOTransport + + +class GroupServiceClientMeta(type): + """Metaclass for the GroupService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[GroupServiceTransport]] + _transport_registry["grpc"] = GroupServiceGrpcTransport + _transport_registry["grpc_asyncio"] = GroupServiceGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[GroupServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GroupServiceClient(metaclass=GroupServiceClientMeta): + """The Group API lets you inspect and manage your + `groups <#google.monitoring.v3.Group>`__. + + A group is a named filter that is used to identify a collection of + monitored resources. Groups are typically used to mirror the + physical and/or logical topology of the environment. Because group + membership is computed dynamically, monitored resources that are + started in the future are automatically placed in matching groups. + By using a group to name monitored resources in, for example, an + alert policy, the target of that alert policy is updated + automatically as monitored resources are added and removed from the + infrastructure. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "monitoring.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def group_path(project: str, group: str,) -> str: + """Return a fully-qualified group string.""" + return "projects/{project}/groups/{group}".format(project=project, group=group,) + + @staticmethod + def parse_group_path(path: str) -> Dict[str, str]: + """Parse a group path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/groups/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, GroupServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the group service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.GroupServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GroupServiceTransport): + # transport is a GroupServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_groups( + self, + request: group_service.ListGroupsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListGroupsPager: + r"""Lists the existing groups. + + Args: + request (:class:`~.group_service.ListGroupsRequest`): + The request object. The `ListGroup` request. + name (:class:`str`): + Required. The project whose groups are to be listed. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListGroupsPager: + The ``ListGroups`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a group_service.ListGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, group_service.ListGroupsRequest): + request = group_service.ListGroupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_groups] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListGroupsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_group( + self, + request: group_service.GetGroupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> group.Group: + r"""Gets a single group. + + Args: + request (:class:`~.group_service.GetGroupRequest`): + The request object. The `GetGroup` request. + name (:class:`str`): + Required. The group to retrieve. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.group.Group: + The description of a dynamic collection of monitored + resources. Each group has a filter that is matched + against monitored resources and their associated + metadata. If a group's filter matches an available + monitored resource, then that resource is a member of + that group. Groups can contain any number of monitored + resources, and each monitored resource can be a member + of any number of groups. + + Groups can be nested in parent-child hierarchies. The + ``parentName`` field identifies an optional parent for + each group. If a group has a parent, then the only + monitored resources available to be matched by the + group's filter are the resources contained in the parent + group. In other words, a group contains the monitored + resources that match its filter and the filters of all + the group's ancestors. A group without a parent can + contain any monitored resource. + + For example, consider an infrastructure running a set of + instances with two user-defined tags: ``"environment"`` + and ``"role"``. A parent group has a filter, + ``environment="production"``. A child of that parent + group has a filter, ``role="transcoder"``. The parent + group contains all instances in the production + environment, regardless of their roles. The child group + contains instances that have the transcoder role *and* + are in the production environment. + + The monitored resources contained in a group can change + at any moment, depending on what resources exist and + what filters are associated with the group and its + ancestors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a group_service.GetGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, group_service.GetGroupRequest): + request = group_service.GetGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_group( + self, + request: group_service.CreateGroupRequest = None, + *, + name: str = None, + group: gm_group.Group = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gm_group.Group: + r"""Creates a new group. + + Args: + request (:class:`~.group_service.CreateGroupRequest`): + The request object. The `CreateGroup` request. + name (:class:`str`): + Required. The project in which to create the group. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + group (:class:`~.gm_group.Group`): + Required. A group definition. It is an error to define + the ``name`` field because the system assigns the name. + This corresponds to the ``group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gm_group.Group: + The description of a dynamic collection of monitored + resources. Each group has a filter that is matched + against monitored resources and their associated + metadata. If a group's filter matches an available + monitored resource, then that resource is a member of + that group. Groups can contain any number of monitored + resources, and each monitored resource can be a member + of any number of groups. + + Groups can be nested in parent-child hierarchies. The + ``parentName`` field identifies an optional parent for + each group. If a group has a parent, then the only + monitored resources available to be matched by the + group's filter are the resources contained in the parent + group. In other words, a group contains the monitored + resources that match its filter and the filters of all + the group's ancestors. A group without a parent can + contain any monitored resource. + + For example, consider an infrastructure running a set of + instances with two user-defined tags: ``"environment"`` + and ``"role"``. A parent group has a filter, + ``environment="production"``. A child of that parent + group has a filter, ``role="transcoder"``. The parent + group contains all instances in the production + environment, regardless of their roles. The child group + contains instances that have the transcoder role *and* + are in the production environment. + + The monitored resources contained in a group can change + at any moment, depending on what resources exist and + what filters are associated with the group and its + ancestors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, group]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a group_service.CreateGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, group_service.CreateGroupRequest): + request = group_service.CreateGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if group is not None: + request.group = group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_group( + self, + request: group_service.UpdateGroupRequest = None, + *, + group: gm_group.Group = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gm_group.Group: + r"""Updates an existing group. You can change any group attributes + except ``name``. + + Args: + request (:class:`~.group_service.UpdateGroupRequest`): + The request object. The `UpdateGroup` request. + group (:class:`~.gm_group.Group`): + Required. The new definition of the group. All fields of + the existing group, excepting ``name``, are replaced + with the corresponding fields of this group. + This corresponds to the ``group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gm_group.Group: + The description of a dynamic collection of monitored + resources. Each group has a filter that is matched + against monitored resources and their associated + metadata. If a group's filter matches an available + monitored resource, then that resource is a member of + that group. Groups can contain any number of monitored + resources, and each monitored resource can be a member + of any number of groups. + + Groups can be nested in parent-child hierarchies. The + ``parentName`` field identifies an optional parent for + each group. If a group has a parent, then the only + monitored resources available to be matched by the + group's filter are the resources contained in the parent + group. In other words, a group contains the monitored + resources that match its filter and the filters of all + the group's ancestors. A group without a parent can + contain any monitored resource. + + For example, consider an infrastructure running a set of + instances with two user-defined tags: ``"environment"`` + and ``"role"``. A parent group has a filter, + ``environment="production"``. A child of that parent + group has a filter, ``role="transcoder"``. The parent + group contains all instances in the production + environment, regardless of their roles. The child group + contains instances that have the transcoder role *and* + are in the production environment. + + The monitored resources contained in a group can change + at any moment, depending on what resources exist and + what filters are associated with the group and its + ancestors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([group]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a group_service.UpdateGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, group_service.UpdateGroupRequest): + request = group_service.UpdateGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if group is not None: + request.group = group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("group.name", request.group.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_group( + self, + request: group_service.DeleteGroupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an existing group. + + Args: + request (:class:`~.group_service.DeleteGroupRequest`): + The request object. The `DeleteGroup` request. The + default behavior is to be able to delete a single group + without any descendants. + name (:class:`str`): + Required. The group to delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a group_service.DeleteGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, group_service.DeleteGroupRequest): + request = group_service.DeleteGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def list_group_members( + self, + request: group_service.ListGroupMembersRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListGroupMembersPager: + r"""Lists the monitored resources that are members of a + group. + + Args: + request (:class:`~.group_service.ListGroupMembersRequest`): + The request object. The `ListGroupMembers` request. + name (:class:`str`): + Required. The group whose members are listed. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListGroupMembersPager: + The ``ListGroupMembers`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a group_service.ListGroupMembersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, group_service.ListGroupMembersRequest): + request = group_service.ListGroupMembersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_group_members] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListGroupMembersPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("GroupServiceClient",) diff --git a/google/cloud/monitoring_v3/services/group_service/pagers.py b/google/cloud/monitoring_v3/services/group_service/pagers.py new file mode 100644 index 00000000..712775f0 --- /dev/null +++ b/google/cloud/monitoring_v3/services/group_service/pagers.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.types import group +from google.cloud.monitoring_v3.types import group_service + + +class ListGroupsPager: + """A pager for iterating through ``list_groups`` requests. + + This class thinly wraps an initial + :class:`~.group_service.ListGroupsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``group`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListGroups`` requests and continue to iterate + through the ``group`` field on the + corresponding responses. + + All the usual :class:`~.group_service.ListGroupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., group_service.ListGroupsResponse], + request: group_service.ListGroupsRequest, + response: group_service.ListGroupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.group_service.ListGroupsRequest`): + The initial request object. + response (:class:`~.group_service.ListGroupsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = group_service.ListGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[group_service.ListGroupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[group.Group]: + for page in self.pages: + yield from page.group + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListGroupsAsyncPager: + """A pager for iterating through ``list_groups`` requests. + + This class thinly wraps an initial + :class:`~.group_service.ListGroupsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``group`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListGroups`` requests and continue to iterate + through the ``group`` field on the + corresponding responses. + + All the usual :class:`~.group_service.ListGroupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[group_service.ListGroupsResponse]], + request: group_service.ListGroupsRequest, + response: group_service.ListGroupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.group_service.ListGroupsRequest`): + The initial request object. + response (:class:`~.group_service.ListGroupsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = group_service.ListGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[group_service.ListGroupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[group.Group]: + async def async_generator(): + async for page in self.pages: + for response in page.group: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListGroupMembersPager: + """A pager for iterating through ``list_group_members`` requests. + + This class thinly wraps an initial + :class:`~.group_service.ListGroupMembersResponse` object, and + provides an ``__iter__`` method to iterate through its + ``members`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListGroupMembers`` requests and continue to iterate + through the ``members`` field on the + corresponding responses. + + All the usual :class:`~.group_service.ListGroupMembersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., group_service.ListGroupMembersResponse], + request: group_service.ListGroupMembersRequest, + response: group_service.ListGroupMembersResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.group_service.ListGroupMembersRequest`): + The initial request object. + response (:class:`~.group_service.ListGroupMembersResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = group_service.ListGroupMembersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[group_service.ListGroupMembersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[monitored_resource.MonitoredResource]: + for page in self.pages: + yield from page.members + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListGroupMembersAsyncPager: + """A pager for iterating through ``list_group_members`` requests. + + This class thinly wraps an initial + :class:`~.group_service.ListGroupMembersResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``members`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListGroupMembers`` requests and continue to iterate + through the ``members`` field on the + corresponding responses. + + All the usual :class:`~.group_service.ListGroupMembersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[group_service.ListGroupMembersResponse]], + request: group_service.ListGroupMembersRequest, + response: group_service.ListGroupMembersResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.group_service.ListGroupMembersRequest`): + The initial request object. + response (:class:`~.group_service.ListGroupMembersResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = group_service.ListGroupMembersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[group_service.ListGroupMembersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[monitored_resource.MonitoredResource]: + async def async_generator(): + async for page in self.pages: + for response in page.members: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/monitoring_v3/services/group_service/transports/__init__.py b/google/cloud/monitoring_v3/services/group_service/transports/__init__.py new file mode 100644 index 00000000..93adbcbe --- /dev/null +++ b/google/cloud/monitoring_v3/services/group_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import GroupServiceTransport +from .grpc import GroupServiceGrpcTransport +from .grpc_asyncio import GroupServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GroupServiceTransport]] +_transport_registry["grpc"] = GroupServiceGrpcTransport +_transport_registry["grpc_asyncio"] = GroupServiceGrpcAsyncIOTransport + + +__all__ = ( + "GroupServiceTransport", + "GroupServiceGrpcTransport", + "GroupServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/monitoring_v3/services/group_service/transports/base.py b/google/cloud/monitoring_v3/services/group_service/transports/base.py new file mode 100644 index 00000000..1b2d543b --- /dev/null +++ b/google/cloud/monitoring_v3/services/group_service/transports/base.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.monitoring_v3.types import group +from google.cloud.monitoring_v3.types import group as gm_group +from google.cloud.monitoring_v3.types import group_service +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class GroupServiceTransport(abc.ABC): + """Abstract transport class for GroupService.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_groups: gapic_v1.method.wrap_method( + self.list_groups, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.get_group: gapic_v1.method.wrap_method( + self.get_group, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.create_group: gapic_v1.method.wrap_method( + self.create_group, default_timeout=30.0, client_info=client_info, + ), + self.update_group: gapic_v1.method.wrap_method( + self.update_group, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.delete_group: gapic_v1.method.wrap_method( + self.delete_group, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.list_group_members: gapic_v1.method.wrap_method( + self.list_group_members, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + } + + @property + def list_groups( + self, + ) -> typing.Callable[ + [group_service.ListGroupsRequest], + typing.Union[ + group_service.ListGroupsResponse, + typing.Awaitable[group_service.ListGroupsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_group( + self, + ) -> typing.Callable[ + [group_service.GetGroupRequest], + typing.Union[group.Group, typing.Awaitable[group.Group]], + ]: + raise NotImplementedError() + + @property + def create_group( + self, + ) -> typing.Callable[ + [group_service.CreateGroupRequest], + typing.Union[gm_group.Group, typing.Awaitable[gm_group.Group]], + ]: + raise NotImplementedError() + + @property + def update_group( + self, + ) -> typing.Callable[ + [group_service.UpdateGroupRequest], + typing.Union[gm_group.Group, typing.Awaitable[gm_group.Group]], + ]: + raise NotImplementedError() + + @property + def delete_group( + self, + ) -> typing.Callable[ + [group_service.DeleteGroupRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def list_group_members( + self, + ) -> typing.Callable[ + [group_service.ListGroupMembersRequest], + typing.Union[ + group_service.ListGroupMembersResponse, + typing.Awaitable[group_service.ListGroupMembersResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("GroupServiceTransport",) diff --git a/google/cloud/monitoring_v3/services/group_service/transports/grpc.py b/google/cloud/monitoring_v3/services/group_service/transports/grpc.py new file mode 100644 index 00000000..c085fff0 --- /dev/null +++ b/google/cloud/monitoring_v3/services/group_service/transports/grpc.py @@ -0,0 +1,403 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.monitoring_v3.types import group +from google.cloud.monitoring_v3.types import group as gm_group +from google.cloud.monitoring_v3.types import group_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import GroupServiceTransport, DEFAULT_CLIENT_INFO + + +class GroupServiceGrpcTransport(GroupServiceTransport): + """gRPC backend transport for GroupService. + + The Group API lets you inspect and manage your + `groups <#google.monitoring.v3.Group>`__. + + A group is a named filter that is used to identify a collection of + monitored resources. Groups are typically used to mirror the + physical and/or logical topology of the environment. Because group + membership is computed dynamically, monitored resources that are + started in the future are automatically placed in matching groups. + By using a group to name monitored resources in, for example, an + alert policy, the target of that alert policy is updated + automatically as monitored resources are added and removed from the + infrastructure. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_groups( + self, + ) -> Callable[[group_service.ListGroupsRequest], group_service.ListGroupsResponse]: + r"""Return a callable for the list groups method over gRPC. + + Lists the existing groups. + + Returns: + Callable[[~.ListGroupsRequest], + ~.ListGroupsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_groups" not in self._stubs: + self._stubs["list_groups"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/ListGroups", + request_serializer=group_service.ListGroupsRequest.serialize, + response_deserializer=group_service.ListGroupsResponse.deserialize, + ) + return self._stubs["list_groups"] + + @property + def get_group(self) -> Callable[[group_service.GetGroupRequest], group.Group]: + r"""Return a callable for the get group method over gRPC. + + Gets a single group. + + Returns: + Callable[[~.GetGroupRequest], + ~.Group]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_group" not in self._stubs: + self._stubs["get_group"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/GetGroup", + request_serializer=group_service.GetGroupRequest.serialize, + response_deserializer=group.Group.deserialize, + ) + return self._stubs["get_group"] + + @property + def create_group( + self, + ) -> Callable[[group_service.CreateGroupRequest], gm_group.Group]: + r"""Return a callable for the create group method over gRPC. + + Creates a new group. + + Returns: + Callable[[~.CreateGroupRequest], + ~.Group]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_group" not in self._stubs: + self._stubs["create_group"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/CreateGroup", + request_serializer=group_service.CreateGroupRequest.serialize, + response_deserializer=gm_group.Group.deserialize, + ) + return self._stubs["create_group"] + + @property + def update_group( + self, + ) -> Callable[[group_service.UpdateGroupRequest], gm_group.Group]: + r"""Return a callable for the update group method over gRPC. + + Updates an existing group. You can change any group attributes + except ``name``. + + Returns: + Callable[[~.UpdateGroupRequest], + ~.Group]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_group" not in self._stubs: + self._stubs["update_group"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/UpdateGroup", + request_serializer=group_service.UpdateGroupRequest.serialize, + response_deserializer=gm_group.Group.deserialize, + ) + return self._stubs["update_group"] + + @property + def delete_group(self) -> Callable[[group_service.DeleteGroupRequest], empty.Empty]: + r"""Return a callable for the delete group method over gRPC. + + Deletes an existing group. + + Returns: + Callable[[~.DeleteGroupRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_group" not in self._stubs: + self._stubs["delete_group"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/DeleteGroup", + request_serializer=group_service.DeleteGroupRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_group"] + + @property + def list_group_members( + self, + ) -> Callable[ + [group_service.ListGroupMembersRequest], group_service.ListGroupMembersResponse + ]: + r"""Return a callable for the list group members method over gRPC. + + Lists the monitored resources that are members of a + group. + + Returns: + Callable[[~.ListGroupMembersRequest], + ~.ListGroupMembersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_group_members" not in self._stubs: + self._stubs["list_group_members"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/ListGroupMembers", + request_serializer=group_service.ListGroupMembersRequest.serialize, + response_deserializer=group_service.ListGroupMembersResponse.deserialize, + ) + return self._stubs["list_group_members"] + + +__all__ = ("GroupServiceGrpcTransport",) diff --git a/google/cloud/monitoring_v3/services/group_service/transports/grpc_asyncio.py b/google/cloud/monitoring_v3/services/group_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..1c66dd37 --- /dev/null +++ b/google/cloud/monitoring_v3/services/group_service/transports/grpc_asyncio.py @@ -0,0 +1,410 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.monitoring_v3.types import group +from google.cloud.monitoring_v3.types import group as gm_group +from google.cloud.monitoring_v3.types import group_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import GroupServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import GroupServiceGrpcTransport + + +class GroupServiceGrpcAsyncIOTransport(GroupServiceTransport): + """gRPC AsyncIO backend transport for GroupService. + + The Group API lets you inspect and manage your + `groups <#google.monitoring.v3.Group>`__. + + A group is a named filter that is used to identify a collection of + monitored resources. Groups are typically used to mirror the + physical and/or logical topology of the environment. Because group + membership is computed dynamically, monitored resources that are + started in the future are automatically placed in matching groups. + By using a group to name monitored resources in, for example, an + alert policy, the target of that alert policy is updated + automatically as monitored resources are added and removed from the + infrastructure. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_groups( + self, + ) -> Callable[ + [group_service.ListGroupsRequest], Awaitable[group_service.ListGroupsResponse] + ]: + r"""Return a callable for the list groups method over gRPC. + + Lists the existing groups. + + Returns: + Callable[[~.ListGroupsRequest], + Awaitable[~.ListGroupsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_groups" not in self._stubs: + self._stubs["list_groups"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/ListGroups", + request_serializer=group_service.ListGroupsRequest.serialize, + response_deserializer=group_service.ListGroupsResponse.deserialize, + ) + return self._stubs["list_groups"] + + @property + def get_group( + self, + ) -> Callable[[group_service.GetGroupRequest], Awaitable[group.Group]]: + r"""Return a callable for the get group method over gRPC. + + Gets a single group. + + Returns: + Callable[[~.GetGroupRequest], + Awaitable[~.Group]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_group" not in self._stubs: + self._stubs["get_group"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/GetGroup", + request_serializer=group_service.GetGroupRequest.serialize, + response_deserializer=group.Group.deserialize, + ) + return self._stubs["get_group"] + + @property + def create_group( + self, + ) -> Callable[[group_service.CreateGroupRequest], Awaitable[gm_group.Group]]: + r"""Return a callable for the create group method over gRPC. + + Creates a new group. + + Returns: + Callable[[~.CreateGroupRequest], + Awaitable[~.Group]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_group" not in self._stubs: + self._stubs["create_group"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/CreateGroup", + request_serializer=group_service.CreateGroupRequest.serialize, + response_deserializer=gm_group.Group.deserialize, + ) + return self._stubs["create_group"] + + @property + def update_group( + self, + ) -> Callable[[group_service.UpdateGroupRequest], Awaitable[gm_group.Group]]: + r"""Return a callable for the update group method over gRPC. + + Updates an existing group. You can change any group attributes + except ``name``. + + Returns: + Callable[[~.UpdateGroupRequest], + Awaitable[~.Group]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_group" not in self._stubs: + self._stubs["update_group"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/UpdateGroup", + request_serializer=group_service.UpdateGroupRequest.serialize, + response_deserializer=gm_group.Group.deserialize, + ) + return self._stubs["update_group"] + + @property + def delete_group( + self, + ) -> Callable[[group_service.DeleteGroupRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete group method over gRPC. + + Deletes an existing group. + + Returns: + Callable[[~.DeleteGroupRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_group" not in self._stubs: + self._stubs["delete_group"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/DeleteGroup", + request_serializer=group_service.DeleteGroupRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_group"] + + @property + def list_group_members( + self, + ) -> Callable[ + [group_service.ListGroupMembersRequest], + Awaitable[group_service.ListGroupMembersResponse], + ]: + r"""Return a callable for the list group members method over gRPC. + + Lists the monitored resources that are members of a + group. + + Returns: + Callable[[~.ListGroupMembersRequest], + Awaitable[~.ListGroupMembersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_group_members" not in self._stubs: + self._stubs["list_group_members"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.GroupService/ListGroupMembers", + request_serializer=group_service.ListGroupMembersRequest.serialize, + response_deserializer=group_service.ListGroupMembersResponse.deserialize, + ) + return self._stubs["list_group_members"] + + +__all__ = ("GroupServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/__init__.py b/google/cloud/monitoring_v3/services/metric_service/__init__.py similarity index 71% rename from google/cloud/__init__.py rename to google/cloud/monitoring_v3/services/metric_service/__init__.py index 9a1b64a6..5e9a2418 100644 --- a/google/cloud/__init__.py +++ b/google/cloud/monitoring_v3/services/metric_service/__init__.py @@ -1,24 +1,24 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil +from .client import MetricServiceClient +from .async_client import MetricServiceAsyncClient - __path__ = pkgutil.extend_path(__path__, __name__) +__all__ = ( + "MetricServiceClient", + "MetricServiceAsyncClient", +) diff --git a/google/cloud/monitoring_v3/services/metric_service/async_client.py b/google/cloud/monitoring_v3/services/metric_service/async_client.py new file mode 100644 index 00000000..08151a08 --- /dev/null +++ b/google/cloud/monitoring_v3/services/metric_service/async_client.py @@ -0,0 +1,955 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import label_pb2 as label # type: ignore +from google.api import launch_stage_pb2 as launch_stage # type: ignore +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.services.metric_service import pagers +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import metric as gm_metric +from google.cloud.monitoring_v3.types import metric_service + +from .transports.base import MetricServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MetricServiceGrpcAsyncIOTransport +from .client import MetricServiceClient + + +class MetricServiceAsyncClient: + """Manages metric descriptors, monitored resource descriptors, + and time series data. + """ + + _client: MetricServiceClient + + DEFAULT_ENDPOINT = MetricServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MetricServiceClient.DEFAULT_MTLS_ENDPOINT + + common_project_path = staticmethod(MetricServiceClient.common_project_path) + parse_common_project_path = staticmethod( + MetricServiceClient.parse_common_project_path + ) + + common_organization_path = staticmethod( + MetricServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + MetricServiceClient.parse_common_organization_path + ) + + common_folder_path = staticmethod(MetricServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + MetricServiceClient.parse_common_folder_path + ) + + common_billing_account_path = staticmethod( + MetricServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + MetricServiceClient.parse_common_billing_account_path + ) + + common_location_path = staticmethod(MetricServiceClient.common_location_path) + parse_common_location_path = staticmethod( + MetricServiceClient.parse_common_location_path + ) + + from_service_account_file = MetricServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(MetricServiceClient).get_transport_class, type(MetricServiceClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, MetricServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the metric service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetricServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = MetricServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_monitored_resource_descriptors( + self, + request: metric_service.ListMonitoredResourceDescriptorsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMonitoredResourceDescriptorsAsyncPager: + r"""Lists monitored resource descriptors that match a + filter. This method does not require a Workspace. + + Args: + request (:class:`~.metric_service.ListMonitoredResourceDescriptorsRequest`): + The request object. The + `ListMonitoredResourceDescriptors` request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListMonitoredResourceDescriptorsAsyncPager: + The ``ListMonitoredResourceDescriptors`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metric_service.ListMonitoredResourceDescriptorsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_monitored_resource_descriptors, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMonitoredResourceDescriptorsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_monitored_resource_descriptor( + self, + request: metric_service.GetMonitoredResourceDescriptorRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> monitored_resource.MonitoredResourceDescriptor: + r"""Gets a single monitored resource descriptor. This + method does not require a Workspace. + + Args: + request (:class:`~.metric_service.GetMonitoredResourceDescriptorRequest`): + The request object. The `GetMonitoredResourceDescriptor` + request. + name (:class:`str`): + Required. The monitored resource descriptor to get. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] + + The ``[RESOURCE_TYPE]`` is a predefined type, such as + ``cloudsql_database``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.monitored_resource.MonitoredResourceDescriptor: + An object that describes the schema of a + [MonitoredResource][google.api.MonitoredResource] object + using a type name and a set of labels. For example, the + monitored resource descriptor for Google Compute Engine + VM instances has a type of ``"gce_instance"`` and + specifies the use of the labels ``"instance_id"`` and + ``"zone"`` to identify particular VM instances. + + Different services can support different monitored + resource types. + + The following are specific rules to service defined + monitored resources for Monitoring and Logging: + + - The ``type``, ``display_name``, ``description``, + ``labels`` and ``launch_stage`` fields are all + required. + - The first label of the monitored resource descriptor + must be ``resource_container``. There are legacy + monitored resource descritptors start with + ``project_id``. + - It must include a ``location`` label. + - Maximum of default 5 service defined monitored + resource descriptors is allowed per service. + - Maximum of default 10 labels per monitored resource + is allowed. + + The default maximum limit can be overridden. Please + follow https://cloud.google.com/monitoring/quotas + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metric_service.GetMonitoredResourceDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_monitored_resource_descriptor, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_metric_descriptors( + self, + request: metric_service.ListMetricDescriptorsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetricDescriptorsAsyncPager: + r"""Lists metric descriptors that match a filter. This + method does not require a Workspace. + + Args: + request (:class:`~.metric_service.ListMetricDescriptorsRequest`): + The request object. The `ListMetricDescriptors` request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListMetricDescriptorsAsyncPager: + The ``ListMetricDescriptors`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metric_service.ListMetricDescriptorsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metric_descriptors, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetricDescriptorsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_metric_descriptor( + self, + request: metric_service.GetMetricDescriptorRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> ga_metric.MetricDescriptor: + r"""Gets a single metric descriptor. This method does not + require a Workspace. + + Args: + request (:class:`~.metric_service.GetMetricDescriptorRequest`): + The request object. The `GetMetricDescriptor` request. + name (:class:`str`): + Required. The metric descriptor on which to execute the + request. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + + An example value of ``[METRIC_ID]`` is + ``"compute.googleapis.com/instance/disk/read_bytes_count"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.ga_metric.MetricDescriptor: + Defines a metric type and its schema. Once a metric + descriptor is created, deleting or altering it stops + data collection and makes the metric type's existing + data unusable. + + The following are specific rules for service defined + Monitoring metric descriptors: + + - ``type``, ``metric_kind``, ``value_type``, + ``description``, ``display_name``, ``launch_stage`` + fields are all required. The ``unit`` field must be + specified if the ``value_type`` is any of DOUBLE, + INT64, DISTRIBUTION. + - Maximum of default 500 metric descriptors per service + is allowed. + - Maximum of default 10 labels per metric descriptor is + allowed. + + The default maximum limit can be overridden. Please + follow https://cloud.google.com/monitoring/quotas + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metric_service.GetMetricDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metric_descriptor, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_metric_descriptor( + self, + request: metric_service.CreateMetricDescriptorRequest = None, + *, + name: str = None, + metric_descriptor: ga_metric.MetricDescriptor = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> ga_metric.MetricDescriptor: + r"""Creates a new metric descriptor. User-created metric descriptors + define `custom + metrics `__. + + Args: + request (:class:`~.metric_service.CreateMetricDescriptorRequest`): + The request object. The `CreateMetricDescriptor` + request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metric_descriptor (:class:`~.ga_metric.MetricDescriptor`): + Required. The new `custom + metric `__ + descriptor. + This corresponds to the ``metric_descriptor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.ga_metric.MetricDescriptor: + Defines a metric type and its schema. Once a metric + descriptor is created, deleting or altering it stops + data collection and makes the metric type's existing + data unusable. + + The following are specific rules for service defined + Monitoring metric descriptors: + + - ``type``, ``metric_kind``, ``value_type``, + ``description``, ``display_name``, ``launch_stage`` + fields are all required. The ``unit`` field must be + specified if the ``value_type`` is any of DOUBLE, + INT64, DISTRIBUTION. + - Maximum of default 500 metric descriptors per service + is allowed. + - Maximum of default 10 labels per metric descriptor is + allowed. + + The default maximum limit can be overridden. Please + follow https://cloud.google.com/monitoring/quotas + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, metric_descriptor]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metric_service.CreateMetricDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if metric_descriptor is not None: + request.metric_descriptor = metric_descriptor + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metric_descriptor, + default_timeout=12.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_metric_descriptor( + self, + request: metric_service.DeleteMetricDescriptorRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a metric descriptor. Only user-created `custom + metrics `__ + can be deleted. + + Args: + request (:class:`~.metric_service.DeleteMetricDescriptorRequest`): + The request object. The `DeleteMetricDescriptor` + request. + name (:class:`str`): + Required. The metric descriptor on which to execute the + request. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + + An example of ``[METRIC_ID]`` is: + ``"custom.googleapis.com/my_test_metric"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metric_service.DeleteMetricDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_metric_descriptor, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def list_time_series( + self, + request: metric_service.ListTimeSeriesRequest = None, + *, + name: str = None, + filter: str = None, + interval: common.TimeInterval = None, + view: metric_service.ListTimeSeriesRequest.TimeSeriesView = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTimeSeriesAsyncPager: + r"""Lists time series that match a filter. This method + does not require a Workspace. + + Args: + request (:class:`~.metric_service.ListTimeSeriesRequest`): + The request object. The `ListTimeSeries` request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Required. A `monitoring + filter `__ + that specifies which time series should be returned. The + filter must specify a single metric type, and can + additionally specify metric labels and other + information. For example: + + :: + + metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + metric.labels.instance_name = "my-instance-name". + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interval (:class:`~.common.TimeInterval`): + Required. The time interval for which + results should be returned. Only time + series that contain data points in the + specified interval are included in the + response. + This corresponds to the ``interval`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + view (:class:`~.metric_service.ListTimeSeriesRequest.TimeSeriesView`): + Required. Specifies which information + is returned about the time series. + This corresponds to the ``view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListTimeSeriesAsyncPager: + The ``ListTimeSeries`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, filter, interval, view]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metric_service.ListTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if filter is not None: + request.filter = filter + if interval is not None: + request.interval = interval + if view is not None: + request.view = view + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_time_series, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTimeSeriesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_time_series( + self, + request: metric_service.CreateTimeSeriesRequest = None, + *, + name: str = None, + time_series: Sequence[gm_metric.TimeSeries] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Creates or adds data to one or more time series. + The response is empty if all time series in the request + were written. If any time series could not be written, a + corresponding failure message is included in the error + response. + + Args: + request (:class:`~.metric_service.CreateTimeSeriesRequest`): + The request object. The `CreateTimeSeries` request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series (:class:`Sequence[~.gm_metric.TimeSeries]`): + Required. The new data to be added to a list of time + series. Adds at most one data point to each of several + time series. The new data point must be more recent than + any other point in its time series. Each ``TimeSeries`` + value must fully specify a unique time series by + supplying all label values for the metric and the + monitored resource. + + The maximum number of ``TimeSeries`` objects per + ``Create`` request is 200. + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, time_series]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metric_service.CreateTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_time_series, + default_timeout=12.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("MetricServiceAsyncClient",) diff --git a/google/cloud/monitoring_v3/services/metric_service/client.py b/google/cloud/monitoring_v3/services/metric_service/client.py new file mode 100644 index 00000000..68645bff --- /dev/null +++ b/google/cloud/monitoring_v3/services/metric_service/client.py @@ -0,0 +1,1108 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import label_pb2 as label # type: ignore +from google.api import launch_stage_pb2 as launch_stage # type: ignore +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.services.metric_service import pagers +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import metric as gm_metric +from google.cloud.monitoring_v3.types import metric_service + +from .transports.base import MetricServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MetricServiceGrpcTransport +from .transports.grpc_asyncio import MetricServiceGrpcAsyncIOTransport + + +class MetricServiceClientMeta(type): + """Metaclass for the MetricService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[MetricServiceTransport]] + _transport_registry["grpc"] = MetricServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MetricServiceGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[MetricServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MetricServiceClient(metaclass=MetricServiceClientMeta): + """Manages metric descriptors, monitored resource descriptors, + and time series data. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "monitoring.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, MetricServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the metric service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetricServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MetricServiceTransport): + # transport is a MetricServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_monitored_resource_descriptors( + self, + request: metric_service.ListMonitoredResourceDescriptorsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMonitoredResourceDescriptorsPager: + r"""Lists monitored resource descriptors that match a + filter. This method does not require a Workspace. + + Args: + request (:class:`~.metric_service.ListMonitoredResourceDescriptorsRequest`): + The request object. The + `ListMonitoredResourceDescriptors` request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListMonitoredResourceDescriptorsPager: + The ``ListMonitoredResourceDescriptors`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metric_service.ListMonitoredResourceDescriptorsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, metric_service.ListMonitoredResourceDescriptorsRequest + ): + request = metric_service.ListMonitoredResourceDescriptorsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_monitored_resource_descriptors + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMonitoredResourceDescriptorsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_monitored_resource_descriptor( + self, + request: metric_service.GetMonitoredResourceDescriptorRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> monitored_resource.MonitoredResourceDescriptor: + r"""Gets a single monitored resource descriptor. This + method does not require a Workspace. + + Args: + request (:class:`~.metric_service.GetMonitoredResourceDescriptorRequest`): + The request object. The `GetMonitoredResourceDescriptor` + request. + name (:class:`str`): + Required. The monitored resource descriptor to get. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] + + The ``[RESOURCE_TYPE]`` is a predefined type, such as + ``cloudsql_database``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.monitored_resource.MonitoredResourceDescriptor: + An object that describes the schema of a + [MonitoredResource][google.api.MonitoredResource] object + using a type name and a set of labels. For example, the + monitored resource descriptor for Google Compute Engine + VM instances has a type of ``"gce_instance"`` and + specifies the use of the labels ``"instance_id"`` and + ``"zone"`` to identify particular VM instances. + + Different services can support different monitored + resource types. + + The following are specific rules to service defined + monitored resources for Monitoring and Logging: + + - The ``type``, ``display_name``, ``description``, + ``labels`` and ``launch_stage`` fields are all + required. + - The first label of the monitored resource descriptor + must be ``resource_container``. There are legacy + monitored resource descritptors start with + ``project_id``. + - It must include a ``location`` label. + - Maximum of default 5 service defined monitored + resource descriptors is allowed per service. + - Maximum of default 10 labels per monitored resource + is allowed. + + The default maximum limit can be overridden. Please + follow https://cloud.google.com/monitoring/quotas + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metric_service.GetMonitoredResourceDescriptorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, metric_service.GetMonitoredResourceDescriptorRequest + ): + request = metric_service.GetMonitoredResourceDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_monitored_resource_descriptor + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_metric_descriptors( + self, + request: metric_service.ListMetricDescriptorsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetricDescriptorsPager: + r"""Lists metric descriptors that match a filter. This + method does not require a Workspace. + + Args: + request (:class:`~.metric_service.ListMetricDescriptorsRequest`): + The request object. The `ListMetricDescriptors` request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListMetricDescriptorsPager: + The ``ListMetricDescriptors`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metric_service.ListMetricDescriptorsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metric_service.ListMetricDescriptorsRequest): + request = metric_service.ListMetricDescriptorsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metric_descriptors] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetricDescriptorsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_metric_descriptor( + self, + request: metric_service.GetMetricDescriptorRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> ga_metric.MetricDescriptor: + r"""Gets a single metric descriptor. This method does not + require a Workspace. + + Args: + request (:class:`~.metric_service.GetMetricDescriptorRequest`): + The request object. The `GetMetricDescriptor` request. + name (:class:`str`): + Required. The metric descriptor on which to execute the + request. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + + An example value of ``[METRIC_ID]`` is + ``"compute.googleapis.com/instance/disk/read_bytes_count"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.ga_metric.MetricDescriptor: + Defines a metric type and its schema. Once a metric + descriptor is created, deleting or altering it stops + data collection and makes the metric type's existing + data unusable. + + The following are specific rules for service defined + Monitoring metric descriptors: + + - ``type``, ``metric_kind``, ``value_type``, + ``description``, ``display_name``, ``launch_stage`` + fields are all required. The ``unit`` field must be + specified if the ``value_type`` is any of DOUBLE, + INT64, DISTRIBUTION. + - Maximum of default 500 metric descriptors per service + is allowed. + - Maximum of default 10 labels per metric descriptor is + allowed. + + The default maximum limit can be overridden. Please + follow https://cloud.google.com/monitoring/quotas + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metric_service.GetMetricDescriptorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metric_service.GetMetricDescriptorRequest): + request = metric_service.GetMetricDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metric_descriptor] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_metric_descriptor( + self, + request: metric_service.CreateMetricDescriptorRequest = None, + *, + name: str = None, + metric_descriptor: ga_metric.MetricDescriptor = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> ga_metric.MetricDescriptor: + r"""Creates a new metric descriptor. User-created metric descriptors + define `custom + metrics `__. + + Args: + request (:class:`~.metric_service.CreateMetricDescriptorRequest`): + The request object. The `CreateMetricDescriptor` + request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metric_descriptor (:class:`~.ga_metric.MetricDescriptor`): + Required. The new `custom + metric `__ + descriptor. + This corresponds to the ``metric_descriptor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.ga_metric.MetricDescriptor: + Defines a metric type and its schema. Once a metric + descriptor is created, deleting or altering it stops + data collection and makes the metric type's existing + data unusable. + + The following are specific rules for service defined + Monitoring metric descriptors: + + - ``type``, ``metric_kind``, ``value_type``, + ``description``, ``display_name``, ``launch_stage`` + fields are all required. The ``unit`` field must be + specified if the ``value_type`` is any of DOUBLE, + INT64, DISTRIBUTION. + - Maximum of default 500 metric descriptors per service + is allowed. + - Maximum of default 10 labels per metric descriptor is + allowed. + + The default maximum limit can be overridden. Please + follow https://cloud.google.com/monitoring/quotas + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, metric_descriptor]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metric_service.CreateMetricDescriptorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metric_service.CreateMetricDescriptorRequest): + request = metric_service.CreateMetricDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if metric_descriptor is not None: + request.metric_descriptor = metric_descriptor + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metric_descriptor] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_metric_descriptor( + self, + request: metric_service.DeleteMetricDescriptorRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a metric descriptor. Only user-created `custom + metrics `__ + can be deleted. + + Args: + request (:class:`~.metric_service.DeleteMetricDescriptorRequest`): + The request object. The `DeleteMetricDescriptor` + request. + name (:class:`str`): + Required. The metric descriptor on which to execute the + request. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + + An example of ``[METRIC_ID]`` is: + ``"custom.googleapis.com/my_test_metric"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metric_service.DeleteMetricDescriptorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metric_service.DeleteMetricDescriptorRequest): + request = metric_service.DeleteMetricDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_metric_descriptor] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def list_time_series( + self, + request: metric_service.ListTimeSeriesRequest = None, + *, + name: str = None, + filter: str = None, + interval: common.TimeInterval = None, + view: metric_service.ListTimeSeriesRequest.TimeSeriesView = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTimeSeriesPager: + r"""Lists time series that match a filter. This method + does not require a Workspace. + + Args: + request (:class:`~.metric_service.ListTimeSeriesRequest`): + The request object. The `ListTimeSeries` request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Required. A `monitoring + filter `__ + that specifies which time series should be returned. The + filter must specify a single metric type, and can + additionally specify metric labels and other + information. For example: + + :: + + metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + metric.labels.instance_name = "my-instance-name". + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interval (:class:`~.common.TimeInterval`): + Required. The time interval for which + results should be returned. Only time + series that contain data points in the + specified interval are included in the + response. + This corresponds to the ``interval`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + view (:class:`~.metric_service.ListTimeSeriesRequest.TimeSeriesView`): + Required. Specifies which information + is returned about the time series. + This corresponds to the ``view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListTimeSeriesPager: + The ``ListTimeSeries`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, filter, interval, view]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metric_service.ListTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metric_service.ListTimeSeriesRequest): + request = metric_service.ListTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if filter is not None: + request.filter = filter + if interval is not None: + request.interval = interval + if view is not None: + request.view = view + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTimeSeriesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def create_time_series( + self, + request: metric_service.CreateTimeSeriesRequest = None, + *, + name: str = None, + time_series: Sequence[gm_metric.TimeSeries] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Creates or adds data to one or more time series. + The response is empty if all time series in the request + were written. If any time series could not be written, a + corresponding failure message is included in the error + response. + + Args: + request (:class:`~.metric_service.CreateTimeSeriesRequest`): + The request object. The `CreateTimeSeries` request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series (:class:`Sequence[~.gm_metric.TimeSeries]`): + Required. The new data to be added to a list of time + series. Adds at most one data point to each of several + time series. The new data point must be more recent than + any other point in its time series. Each ``TimeSeries`` + value must fully specify a unique time series by + supplying all label values for the metric and the + monitored resource. + + The maximum number of ``TimeSeries`` objects per + ``Create`` request is 200. + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metric_service.CreateTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metric_service.CreateTimeSeriesRequest): + request = metric_service.CreateTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + if time_series: + request.time_series.extend(time_series) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("MetricServiceClient",) diff --git a/google/cloud/monitoring_v3/services/metric_service/pagers.py b/google/cloud/monitoring_v3/services/metric_service/pagers.py new file mode 100644 index 00000000..d93703ec --- /dev/null +++ b/google/cloud/monitoring_v3/services/metric_service/pagers.py @@ -0,0 +1,417 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.types import metric as gm_metric +from google.cloud.monitoring_v3.types import metric_service + + +class ListMonitoredResourceDescriptorsPager: + """A pager for iterating through ``list_monitored_resource_descriptors`` requests. + + This class thinly wraps an initial + :class:`~.metric_service.ListMonitoredResourceDescriptorsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``resource_descriptors`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMonitoredResourceDescriptors`` requests and continue to iterate + through the ``resource_descriptors`` field on the + corresponding responses. + + All the usual :class:`~.metric_service.ListMonitoredResourceDescriptorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metric_service.ListMonitoredResourceDescriptorsResponse], + request: metric_service.ListMonitoredResourceDescriptorsRequest, + response: metric_service.ListMonitoredResourceDescriptorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.metric_service.ListMonitoredResourceDescriptorsRequest`): + The initial request object. + response (:class:`~.metric_service.ListMonitoredResourceDescriptorsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metric_service.ListMonitoredResourceDescriptorsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterable[metric_service.ListMonitoredResourceDescriptorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[monitored_resource.MonitoredResourceDescriptor]: + for page in self.pages: + yield from page.resource_descriptors + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMonitoredResourceDescriptorsAsyncPager: + """A pager for iterating through ``list_monitored_resource_descriptors`` requests. + + This class thinly wraps an initial + :class:`~.metric_service.ListMonitoredResourceDescriptorsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``resource_descriptors`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMonitoredResourceDescriptors`` requests and continue to iterate + through the ``resource_descriptors`` field on the + corresponding responses. + + All the usual :class:`~.metric_service.ListMonitoredResourceDescriptorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[metric_service.ListMonitoredResourceDescriptorsResponse] + ], + request: metric_service.ListMonitoredResourceDescriptorsRequest, + response: metric_service.ListMonitoredResourceDescriptorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.metric_service.ListMonitoredResourceDescriptorsRequest`): + The initial request object. + response (:class:`~.metric_service.ListMonitoredResourceDescriptorsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metric_service.ListMonitoredResourceDescriptorsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[metric_service.ListMonitoredResourceDescriptorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__( + self, + ) -> AsyncIterable[monitored_resource.MonitoredResourceDescriptor]: + async def async_generator(): + async for page in self.pages: + for response in page.resource_descriptors: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMetricDescriptorsPager: + """A pager for iterating through ``list_metric_descriptors`` requests. + + This class thinly wraps an initial + :class:`~.metric_service.ListMetricDescriptorsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metric_descriptors`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetricDescriptors`` requests and continue to iterate + through the ``metric_descriptors`` field on the + corresponding responses. + + All the usual :class:`~.metric_service.ListMetricDescriptorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metric_service.ListMetricDescriptorsResponse], + request: metric_service.ListMetricDescriptorsRequest, + response: metric_service.ListMetricDescriptorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.metric_service.ListMetricDescriptorsRequest`): + The initial request object. + response (:class:`~.metric_service.ListMetricDescriptorsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metric_service.ListMetricDescriptorsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metric_service.ListMetricDescriptorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[ga_metric.MetricDescriptor]: + for page in self.pages: + yield from page.metric_descriptors + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMetricDescriptorsAsyncPager: + """A pager for iterating through ``list_metric_descriptors`` requests. + + This class thinly wraps an initial + :class:`~.metric_service.ListMetricDescriptorsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metric_descriptors`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetricDescriptors`` requests and continue to iterate + through the ``metric_descriptors`` field on the + corresponding responses. + + All the usual :class:`~.metric_service.ListMetricDescriptorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metric_service.ListMetricDescriptorsResponse]], + request: metric_service.ListMetricDescriptorsRequest, + response: metric_service.ListMetricDescriptorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.metric_service.ListMetricDescriptorsRequest`): + The initial request object. + response (:class:`~.metric_service.ListMetricDescriptorsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metric_service.ListMetricDescriptorsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[metric_service.ListMetricDescriptorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[ga_metric.MetricDescriptor]: + async def async_generator(): + async for page in self.pages: + for response in page.metric_descriptors: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTimeSeriesPager: + """A pager for iterating through ``list_time_series`` requests. + + This class thinly wraps an initial + :class:`~.metric_service.ListTimeSeriesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``time_series`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTimeSeries`` requests and continue to iterate + through the ``time_series`` field on the + corresponding responses. + + All the usual :class:`~.metric_service.ListTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metric_service.ListTimeSeriesResponse], + request: metric_service.ListTimeSeriesRequest, + response: metric_service.ListTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.metric_service.ListTimeSeriesRequest`): + The initial request object. + response (:class:`~.metric_service.ListTimeSeriesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metric_service.ListTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metric_service.ListTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[gm_metric.TimeSeries]: + for page in self.pages: + yield from page.time_series + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTimeSeriesAsyncPager: + """A pager for iterating through ``list_time_series`` requests. + + This class thinly wraps an initial + :class:`~.metric_service.ListTimeSeriesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``time_series`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTimeSeries`` requests and continue to iterate + through the ``time_series`` field on the + corresponding responses. + + All the usual :class:`~.metric_service.ListTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metric_service.ListTimeSeriesResponse]], + request: metric_service.ListTimeSeriesRequest, + response: metric_service.ListTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.metric_service.ListTimeSeriesRequest`): + The initial request object. + response (:class:`~.metric_service.ListTimeSeriesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metric_service.ListTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metric_service.ListTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[gm_metric.TimeSeries]: + async def async_generator(): + async for page in self.pages: + for response in page.time_series: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/monitoring_v3/services/metric_service/transports/__init__.py b/google/cloud/monitoring_v3/services/metric_service/transports/__init__.py new file mode 100644 index 00000000..1fd84db5 --- /dev/null +++ b/google/cloud/monitoring_v3/services/metric_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import MetricServiceTransport +from .grpc import MetricServiceGrpcTransport +from .grpc_asyncio import MetricServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MetricServiceTransport]] +_transport_registry["grpc"] = MetricServiceGrpcTransport +_transport_registry["grpc_asyncio"] = MetricServiceGrpcAsyncIOTransport + + +__all__ = ( + "MetricServiceTransport", + "MetricServiceGrpcTransport", + "MetricServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/monitoring_v3/services/metric_service/transports/base.py b/google/cloud/monitoring_v3/services/metric_service/transports/base.py new file mode 100644 index 00000000..8d058bc6 --- /dev/null +++ b/google/cloud/monitoring_v3/services/metric_service/transports/base.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.types import metric_service +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class MetricServiceTransport(abc.ABC): + """Abstract transport class for MetricService.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_monitored_resource_descriptors: gapic_v1.method.wrap_method( + self.list_monitored_resource_descriptors, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.get_monitored_resource_descriptor: gapic_v1.method.wrap_method( + self.get_monitored_resource_descriptor, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.list_metric_descriptors: gapic_v1.method.wrap_method( + self.list_metric_descriptors, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.get_metric_descriptor: gapic_v1.method.wrap_method( + self.get_metric_descriptor, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.create_metric_descriptor: gapic_v1.method.wrap_method( + self.create_metric_descriptor, + default_timeout=12.0, + client_info=client_info, + ), + self.delete_metric_descriptor: gapic_v1.method.wrap_method( + self.delete_metric_descriptor, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.list_time_series: gapic_v1.method.wrap_method( + self.list_time_series, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.create_time_series: gapic_v1.method.wrap_method( + self.create_time_series, default_timeout=12.0, client_info=client_info, + ), + } + + @property + def list_monitored_resource_descriptors( + self, + ) -> typing.Callable[ + [metric_service.ListMonitoredResourceDescriptorsRequest], + typing.Union[ + metric_service.ListMonitoredResourceDescriptorsResponse, + typing.Awaitable[metric_service.ListMonitoredResourceDescriptorsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_monitored_resource_descriptor( + self, + ) -> typing.Callable[ + [metric_service.GetMonitoredResourceDescriptorRequest], + typing.Union[ + monitored_resource.MonitoredResourceDescriptor, + typing.Awaitable[monitored_resource.MonitoredResourceDescriptor], + ], + ]: + raise NotImplementedError() + + @property + def list_metric_descriptors( + self, + ) -> typing.Callable[ + [metric_service.ListMetricDescriptorsRequest], + typing.Union[ + metric_service.ListMetricDescriptorsResponse, + typing.Awaitable[metric_service.ListMetricDescriptorsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_metric_descriptor( + self, + ) -> typing.Callable[ + [metric_service.GetMetricDescriptorRequest], + typing.Union[ + ga_metric.MetricDescriptor, typing.Awaitable[ga_metric.MetricDescriptor] + ], + ]: + raise NotImplementedError() + + @property + def create_metric_descriptor( + self, + ) -> typing.Callable[ + [metric_service.CreateMetricDescriptorRequest], + typing.Union[ + ga_metric.MetricDescriptor, typing.Awaitable[ga_metric.MetricDescriptor] + ], + ]: + raise NotImplementedError() + + @property + def delete_metric_descriptor( + self, + ) -> typing.Callable[ + [metric_service.DeleteMetricDescriptorRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def list_time_series( + self, + ) -> typing.Callable[ + [metric_service.ListTimeSeriesRequest], + typing.Union[ + metric_service.ListTimeSeriesResponse, + typing.Awaitable[metric_service.ListTimeSeriesResponse], + ], + ]: + raise NotImplementedError() + + @property + def create_time_series( + self, + ) -> typing.Callable[ + [metric_service.CreateTimeSeriesRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("MetricServiceTransport",) diff --git a/google/cloud/monitoring_v3/services/metric_service/transports/grpc.py b/google/cloud/monitoring_v3/services/metric_service/transports/grpc.py new file mode 100644 index 00000000..914057fe --- /dev/null +++ b/google/cloud/monitoring_v3/services/metric_service/transports/grpc.py @@ -0,0 +1,479 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.types import metric_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import MetricServiceTransport, DEFAULT_CLIENT_INFO + + +class MetricServiceGrpcTransport(MetricServiceTransport): + """gRPC backend transport for MetricService. + + Manages metric descriptors, monitored resource descriptors, + and time series data. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_monitored_resource_descriptors( + self, + ) -> Callable[ + [metric_service.ListMonitoredResourceDescriptorsRequest], + metric_service.ListMonitoredResourceDescriptorsResponse, + ]: + r"""Return a callable for the list monitored resource + descriptors method over gRPC. + + Lists monitored resource descriptors that match a + filter. This method does not require a Workspace. + + Returns: + Callable[[~.ListMonitoredResourceDescriptorsRequest], + ~.ListMonitoredResourceDescriptorsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_monitored_resource_descriptors" not in self._stubs: + self._stubs[ + "list_monitored_resource_descriptors" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + request_serializer=metric_service.ListMonitoredResourceDescriptorsRequest.serialize, + response_deserializer=metric_service.ListMonitoredResourceDescriptorsResponse.deserialize, + ) + return self._stubs["list_monitored_resource_descriptors"] + + @property + def get_monitored_resource_descriptor( + self, + ) -> Callable[ + [metric_service.GetMonitoredResourceDescriptorRequest], + monitored_resource.MonitoredResourceDescriptor, + ]: + r"""Return a callable for the get monitored resource + descriptor method over gRPC. + + Gets a single monitored resource descriptor. This + method does not require a Workspace. + + Returns: + Callable[[~.GetMonitoredResourceDescriptorRequest], + ~.MonitoredResourceDescriptor]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_monitored_resource_descriptor" not in self._stubs: + self._stubs[ + "get_monitored_resource_descriptor" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + request_serializer=metric_service.GetMonitoredResourceDescriptorRequest.serialize, + response_deserializer=monitored_resource.MonitoredResourceDescriptor.FromString, + ) + return self._stubs["get_monitored_resource_descriptor"] + + @property + def list_metric_descriptors( + self, + ) -> Callable[ + [metric_service.ListMetricDescriptorsRequest], + metric_service.ListMetricDescriptorsResponse, + ]: + r"""Return a callable for the list metric descriptors method over gRPC. + + Lists metric descriptors that match a filter. This + method does not require a Workspace. + + Returns: + Callable[[~.ListMetricDescriptorsRequest], + ~.ListMetricDescriptorsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metric_descriptors" not in self._stubs: + self._stubs["list_metric_descriptors"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/ListMetricDescriptors", + request_serializer=metric_service.ListMetricDescriptorsRequest.serialize, + response_deserializer=metric_service.ListMetricDescriptorsResponse.deserialize, + ) + return self._stubs["list_metric_descriptors"] + + @property + def get_metric_descriptor( + self, + ) -> Callable[ + [metric_service.GetMetricDescriptorRequest], ga_metric.MetricDescriptor + ]: + r"""Return a callable for the get metric descriptor method over gRPC. + + Gets a single metric descriptor. This method does not + require a Workspace. + + Returns: + Callable[[~.GetMetricDescriptorRequest], + ~.MetricDescriptor]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metric_descriptor" not in self._stubs: + self._stubs["get_metric_descriptor"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/GetMetricDescriptor", + request_serializer=metric_service.GetMetricDescriptorRequest.serialize, + response_deserializer=ga_metric.MetricDescriptor.FromString, + ) + return self._stubs["get_metric_descriptor"] + + @property + def create_metric_descriptor( + self, + ) -> Callable[ + [metric_service.CreateMetricDescriptorRequest], ga_metric.MetricDescriptor + ]: + r"""Return a callable for the create metric descriptor method over gRPC. + + Creates a new metric descriptor. User-created metric descriptors + define `custom + metrics `__. + + Returns: + Callable[[~.CreateMetricDescriptorRequest], + ~.MetricDescriptor]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metric_descriptor" not in self._stubs: + self._stubs["create_metric_descriptor"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + request_serializer=metric_service.CreateMetricDescriptorRequest.serialize, + response_deserializer=ga_metric.MetricDescriptor.FromString, + ) + return self._stubs["create_metric_descriptor"] + + @property + def delete_metric_descriptor( + self, + ) -> Callable[[metric_service.DeleteMetricDescriptorRequest], empty.Empty]: + r"""Return a callable for the delete metric descriptor method over gRPC. + + Deletes a metric descriptor. Only user-created `custom + metrics `__ + can be deleted. + + Returns: + Callable[[~.DeleteMetricDescriptorRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_metric_descriptor" not in self._stubs: + self._stubs["delete_metric_descriptor"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + request_serializer=metric_service.DeleteMetricDescriptorRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_metric_descriptor"] + + @property + def list_time_series( + self, + ) -> Callable[ + [metric_service.ListTimeSeriesRequest], metric_service.ListTimeSeriesResponse + ]: + r"""Return a callable for the list time series method over gRPC. + + Lists time series that match a filter. This method + does not require a Workspace. + + Returns: + Callable[[~.ListTimeSeriesRequest], + ~.ListTimeSeriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_time_series" not in self._stubs: + self._stubs["list_time_series"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/ListTimeSeries", + request_serializer=metric_service.ListTimeSeriesRequest.serialize, + response_deserializer=metric_service.ListTimeSeriesResponse.deserialize, + ) + return self._stubs["list_time_series"] + + @property + def create_time_series( + self, + ) -> Callable[[metric_service.CreateTimeSeriesRequest], empty.Empty]: + r"""Return a callable for the create time series method over gRPC. + + Creates or adds data to one or more time series. + The response is empty if all time series in the request + were written. If any time series could not be written, a + corresponding failure message is included in the error + response. + + Returns: + Callable[[~.CreateTimeSeriesRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_time_series" not in self._stubs: + self._stubs["create_time_series"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/CreateTimeSeries", + request_serializer=metric_service.CreateTimeSeriesRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["create_time_series"] + + +__all__ = ("MetricServiceGrpcTransport",) diff --git a/google/cloud/monitoring_v3/services/metric_service/transports/grpc_asyncio.py b/google/cloud/monitoring_v3/services/metric_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..b03e4b1e --- /dev/null +++ b/google/cloud/monitoring_v3/services/metric_service/transports/grpc_asyncio.py @@ -0,0 +1,484 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.types import metric_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import MetricServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MetricServiceGrpcTransport + + +class MetricServiceGrpcAsyncIOTransport(MetricServiceTransport): + """gRPC AsyncIO backend transport for MetricService. + + Manages metric descriptors, monitored resource descriptors, + and time series data. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_monitored_resource_descriptors( + self, + ) -> Callable[ + [metric_service.ListMonitoredResourceDescriptorsRequest], + Awaitable[metric_service.ListMonitoredResourceDescriptorsResponse], + ]: + r"""Return a callable for the list monitored resource + descriptors method over gRPC. + + Lists monitored resource descriptors that match a + filter. This method does not require a Workspace. + + Returns: + Callable[[~.ListMonitoredResourceDescriptorsRequest], + Awaitable[~.ListMonitoredResourceDescriptorsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_monitored_resource_descriptors" not in self._stubs: + self._stubs[ + "list_monitored_resource_descriptors" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + request_serializer=metric_service.ListMonitoredResourceDescriptorsRequest.serialize, + response_deserializer=metric_service.ListMonitoredResourceDescriptorsResponse.deserialize, + ) + return self._stubs["list_monitored_resource_descriptors"] + + @property + def get_monitored_resource_descriptor( + self, + ) -> Callable[ + [metric_service.GetMonitoredResourceDescriptorRequest], + Awaitable[monitored_resource.MonitoredResourceDescriptor], + ]: + r"""Return a callable for the get monitored resource + descriptor method over gRPC. + + Gets a single monitored resource descriptor. This + method does not require a Workspace. + + Returns: + Callable[[~.GetMonitoredResourceDescriptorRequest], + Awaitable[~.MonitoredResourceDescriptor]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_monitored_resource_descriptor" not in self._stubs: + self._stubs[ + "get_monitored_resource_descriptor" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + request_serializer=metric_service.GetMonitoredResourceDescriptorRequest.serialize, + response_deserializer=monitored_resource.MonitoredResourceDescriptor.FromString, + ) + return self._stubs["get_monitored_resource_descriptor"] + + @property + def list_metric_descriptors( + self, + ) -> Callable[ + [metric_service.ListMetricDescriptorsRequest], + Awaitable[metric_service.ListMetricDescriptorsResponse], + ]: + r"""Return a callable for the list metric descriptors method over gRPC. + + Lists metric descriptors that match a filter. This + method does not require a Workspace. + + Returns: + Callable[[~.ListMetricDescriptorsRequest], + Awaitable[~.ListMetricDescriptorsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metric_descriptors" not in self._stubs: + self._stubs["list_metric_descriptors"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/ListMetricDescriptors", + request_serializer=metric_service.ListMetricDescriptorsRequest.serialize, + response_deserializer=metric_service.ListMetricDescriptorsResponse.deserialize, + ) + return self._stubs["list_metric_descriptors"] + + @property + def get_metric_descriptor( + self, + ) -> Callable[ + [metric_service.GetMetricDescriptorRequest], + Awaitable[ga_metric.MetricDescriptor], + ]: + r"""Return a callable for the get metric descriptor method over gRPC. + + Gets a single metric descriptor. This method does not + require a Workspace. + + Returns: + Callable[[~.GetMetricDescriptorRequest], + Awaitable[~.MetricDescriptor]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metric_descriptor" not in self._stubs: + self._stubs["get_metric_descriptor"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/GetMetricDescriptor", + request_serializer=metric_service.GetMetricDescriptorRequest.serialize, + response_deserializer=ga_metric.MetricDescriptor.FromString, + ) + return self._stubs["get_metric_descriptor"] + + @property + def create_metric_descriptor( + self, + ) -> Callable[ + [metric_service.CreateMetricDescriptorRequest], + Awaitable[ga_metric.MetricDescriptor], + ]: + r"""Return a callable for the create metric descriptor method over gRPC. + + Creates a new metric descriptor. User-created metric descriptors + define `custom + metrics `__. + + Returns: + Callable[[~.CreateMetricDescriptorRequest], + Awaitable[~.MetricDescriptor]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metric_descriptor" not in self._stubs: + self._stubs["create_metric_descriptor"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + request_serializer=metric_service.CreateMetricDescriptorRequest.serialize, + response_deserializer=ga_metric.MetricDescriptor.FromString, + ) + return self._stubs["create_metric_descriptor"] + + @property + def delete_metric_descriptor( + self, + ) -> Callable[ + [metric_service.DeleteMetricDescriptorRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete metric descriptor method over gRPC. + + Deletes a metric descriptor. Only user-created `custom + metrics `__ + can be deleted. + + Returns: + Callable[[~.DeleteMetricDescriptorRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_metric_descriptor" not in self._stubs: + self._stubs["delete_metric_descriptor"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + request_serializer=metric_service.DeleteMetricDescriptorRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_metric_descriptor"] + + @property + def list_time_series( + self, + ) -> Callable[ + [metric_service.ListTimeSeriesRequest], + Awaitable[metric_service.ListTimeSeriesResponse], + ]: + r"""Return a callable for the list time series method over gRPC. + + Lists time series that match a filter. This method + does not require a Workspace. + + Returns: + Callable[[~.ListTimeSeriesRequest], + Awaitable[~.ListTimeSeriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_time_series" not in self._stubs: + self._stubs["list_time_series"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/ListTimeSeries", + request_serializer=metric_service.ListTimeSeriesRequest.serialize, + response_deserializer=metric_service.ListTimeSeriesResponse.deserialize, + ) + return self._stubs["list_time_series"] + + @property + def create_time_series( + self, + ) -> Callable[[metric_service.CreateTimeSeriesRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the create time series method over gRPC. + + Creates or adds data to one or more time series. + The response is empty if all time series in the request + were written. If any time series could not be written, a + corresponding failure message is included in the error + response. + + Returns: + Callable[[~.CreateTimeSeriesRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_time_series" not in self._stubs: + self._stubs["create_time_series"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.MetricService/CreateTimeSeries", + request_serializer=metric_service.CreateTimeSeriesRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["create_time_series"] + + +__all__ = ("MetricServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/monitoring_v3/services/notification_channel_service/__init__.py b/google/cloud/monitoring_v3/services/notification_channel_service/__init__.py new file mode 100644 index 00000000..a2a50f8b --- /dev/null +++ b/google/cloud/monitoring_v3/services/notification_channel_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import NotificationChannelServiceClient +from .async_client import NotificationChannelServiceAsyncClient + +__all__ = ( + "NotificationChannelServiceClient", + "NotificationChannelServiceAsyncClient", +) diff --git a/google/cloud/monitoring_v3/services/notification_channel_service/async_client.py b/google/cloud/monitoring_v3/services/notification_channel_service/async_client.py new file mode 100644 index 00000000..ac7d9569 --- /dev/null +++ b/google/cloud/monitoring_v3/services/notification_channel_service/async_client.py @@ -0,0 +1,1111 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import label_pb2 as label # type: ignore +from google.api import launch_stage_pb2 as launch_stage # type: ignore +from google.cloud.monitoring_v3.services.notification_channel_service import pagers +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import notification +from google.cloud.monitoring_v3.types import notification_service +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + +from .transports.base import NotificationChannelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import NotificationChannelServiceGrpcAsyncIOTransport +from .client import NotificationChannelServiceClient + + +class NotificationChannelServiceAsyncClient: + """The Notification Channel API provides access to configuration + that controls how messages related to incidents are sent. + """ + + _client: NotificationChannelServiceClient + + DEFAULT_ENDPOINT = NotificationChannelServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = NotificationChannelServiceClient.DEFAULT_MTLS_ENDPOINT + + notification_channel_path = staticmethod( + NotificationChannelServiceClient.notification_channel_path + ) + parse_notification_channel_path = staticmethod( + NotificationChannelServiceClient.parse_notification_channel_path + ) + notification_channel_descriptor_path = staticmethod( + NotificationChannelServiceClient.notification_channel_descriptor_path + ) + parse_notification_channel_descriptor_path = staticmethod( + NotificationChannelServiceClient.parse_notification_channel_descriptor_path + ) + + common_project_path = staticmethod( + NotificationChannelServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + NotificationChannelServiceClient.parse_common_project_path + ) + + common_organization_path = staticmethod( + NotificationChannelServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + NotificationChannelServiceClient.parse_common_organization_path + ) + + common_folder_path = staticmethod( + NotificationChannelServiceClient.common_folder_path + ) + parse_common_folder_path = staticmethod( + NotificationChannelServiceClient.parse_common_folder_path + ) + + common_billing_account_path = staticmethod( + NotificationChannelServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + NotificationChannelServiceClient.parse_common_billing_account_path + ) + + common_location_path = staticmethod( + NotificationChannelServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + NotificationChannelServiceClient.parse_common_location_path + ) + + from_service_account_file = ( + NotificationChannelServiceClient.from_service_account_file + ) + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(NotificationChannelServiceClient).get_transport_class, + type(NotificationChannelServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, NotificationChannelServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the notification channel service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.NotificationChannelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = NotificationChannelServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_notification_channel_descriptors( + self, + request: notification_service.ListNotificationChannelDescriptorsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotificationChannelDescriptorsAsyncPager: + r"""Lists the descriptors for supported channel types. + The use of descriptors makes it possible for new channel + types to be dynamically added. + + Args: + request (:class:`~.notification_service.ListNotificationChannelDescriptorsRequest`): + The request object. The + `ListNotificationChannelDescriptors` request. + name (:class:`str`): + Required. The REST resource name of the parent from + which to retrieve the notification channel descriptors. + The expected syntax is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this names the parent container in which to + look for the descriptors; to retrieve a single + descriptor by name, use the + [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + operation, instead. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListNotificationChannelDescriptorsAsyncPager: + The ``ListNotificationChannelDescriptors`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.ListNotificationChannelDescriptorsRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_notification_channel_descriptors, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListNotificationChannelDescriptorsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_notification_channel_descriptor( + self, + request: notification_service.GetNotificationChannelDescriptorRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannelDescriptor: + r"""Gets a single channel descriptor. The descriptor + indicates which fields are expected / permitted for a + notification channel of the given type. + + Args: + request (:class:`~.notification_service.GetNotificationChannelDescriptorRequest`): + The request object. The + `GetNotificationChannelDescriptor` response. + name (:class:`str`): + Required. The channel type for which to execute the + request. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannelDescriptor: + A description of a notification + channel. The descriptor includes the + properties of the channel and the set of + labels or fields that must be specified + to configure channels of a given type. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.GetNotificationChannelDescriptorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_notification_channel_descriptor, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_notification_channels( + self, + request: notification_service.ListNotificationChannelsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotificationChannelsAsyncPager: + r"""Lists the notification channels that have been + created for the project. + + Args: + request (:class:`~.notification_service.ListNotificationChannelsRequest`): + The request object. The `ListNotificationChannels` + request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + This names the container in which to look for the + notification channels; it does not name a specific + channel. To query a specific channel by REST resource + name, use the + [``GetNotificationChannel``][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + operation. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListNotificationChannelsAsyncPager: + The ``ListNotificationChannels`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.ListNotificationChannelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_notification_channels, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListNotificationChannelsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_notification_channel( + self, + request: notification_service.GetNotificationChannelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannel: + r"""Gets a single notification channel. The channel + includes the relevant configuration details with which + the channel was created. However, the response may + truncate or omit passwords, API keys, or other private + key matter and thus the response may not be 100% + identical to the information that was supplied in the + call to the create method. + + Args: + request (:class:`~.notification_service.GetNotificationChannelRequest`): + The request object. The `GetNotificationChannel` + request. + name (:class:`str`): + Required. The channel for which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannel: + A ``NotificationChannel`` is a medium through which an + alert is delivered when a policy violation is detected. + Examples of channels include email, SMS, and third-party + messaging applications. Fields containing sensitive + information like authentication tokens or contact info + are only partially populated on retrieval. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.GetNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_notification_channel, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_notification_channel( + self, + request: notification_service.CreateNotificationChannelRequest = None, + *, + name: str = None, + notification_channel: notification.NotificationChannel = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannel: + r"""Creates a new notification channel, representing a + single notification endpoint such as an email address, + SMS number, or PagerDuty service. + + Args: + request (:class:`~.notification_service.CreateNotificationChannelRequest`): + The request object. The `CreateNotificationChannel` + request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + This names the container into which the channel will be + written, this does not name the newly created channel. + The resulting channel's name will have a normalized + version of this field as a prefix, but will add + ``/notificationChannels/[CHANNEL_ID]`` to identify the + channel. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notification_channel (:class:`~.notification.NotificationChannel`): + Required. The definition of the ``NotificationChannel`` + to create. + This corresponds to the ``notification_channel`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannel: + A ``NotificationChannel`` is a medium through which an + alert is delivered when a policy violation is detected. + Examples of channels include email, SMS, and third-party + messaging applications. Fields containing sensitive + information like authentication tokens or contact info + are only partially populated on retrieval. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, notification_channel]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.CreateNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if notification_channel is not None: + request.notification_channel = notification_channel + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_notification_channel, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_notification_channel( + self, + request: notification_service.UpdateNotificationChannelRequest = None, + *, + update_mask: field_mask.FieldMask = None, + notification_channel: notification.NotificationChannel = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannel: + r"""Updates a notification channel. Fields not specified + in the field mask remain unchanged. + + Args: + request (:class:`~.notification_service.UpdateNotificationChannelRequest`): + The request object. The `UpdateNotificationChannel` + request. + update_mask (:class:`~.field_mask.FieldMask`): + The fields to update. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notification_channel (:class:`~.notification.NotificationChannel`): + Required. A description of the changes to be applied to + the specified notification channel. The description must + provide a definition for fields to be updated; the names + of these fields should also be included in the + ``update_mask``. + This corresponds to the ``notification_channel`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannel: + A ``NotificationChannel`` is a medium through which an + alert is delivered when a policy violation is detected. + Examples of channels include email, SMS, and third-party + messaging applications. Fields containing sensitive + information like authentication tokens or contact info + are only partially populated on retrieval. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([update_mask, notification_channel]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.UpdateNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if update_mask is not None: + request.update_mask = update_mask + if notification_channel is not None: + request.notification_channel = notification_channel + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_notification_channel, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("notification_channel.name", request.notification_channel.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_notification_channel( + self, + request: notification_service.DeleteNotificationChannelRequest = None, + *, + name: str = None, + force: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a notification channel. + + Args: + request (:class:`~.notification_service.DeleteNotificationChannelRequest`): + The request object. The `DeleteNotificationChannel` + request. + name (:class:`str`): + Required. The channel for which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (:class:`bool`): + If true, the notification channel + will be deleted regardless of its use in + alert policies (the policies will be + updated to remove the channel). If + false, channels that are still + referenced by an existing alerting + policy will fail to be deleted in a + delete operation. + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, force]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.DeleteNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_notification_channel, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def send_notification_channel_verification_code( + self, + request: notification_service.SendNotificationChannelVerificationCodeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Causes a verification code to be delivered to the channel. The + code can then be supplied in ``VerifyNotificationChannel`` to + verify the channel. + + Args: + request (:class:`~.notification_service.SendNotificationChannelVerificationCodeRequest`): + The request object. The + `SendNotificationChannelVerificationCode` request. + name (:class:`str`): + Required. The notification channel to + which to send a verification code. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.SendNotificationChannelVerificationCodeRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.send_notification_channel_verification_code, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def get_notification_channel_verification_code( + self, + request: notification_service.GetNotificationChannelVerificationCodeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification_service.GetNotificationChannelVerificationCodeResponse: + r"""Requests a verification code for an already verified + channel that can then be used in a call to + VerifyNotificationChannel() on a different channel with + an equivalent identity in the same or in a different + project. This makes it possible to copy a channel + between projects without requiring manual reverification + of the channel. If the channel is not in the verified + state, this method will fail (in other words, this may + only be used if the + SendNotificationChannelVerificationCode and + VerifyNotificationChannel paths have already been used + to put the given channel into the verified state). + + There is no guarantee that the verification codes + returned by this method will be of a similar structure + or form as the ones that are delivered to the channel + via SendNotificationChannelVerificationCode; while + VerifyNotificationChannel() will recognize both the + codes delivered via + SendNotificationChannelVerificationCode() and returned + from GetNotificationChannelVerificationCode(), it is + typically the case that the verification codes delivered + via + SendNotificationChannelVerificationCode() will be + shorter and also have a shorter expiration (e.g. codes + such as "G-123456") whereas GetVerificationCode() will + typically return a much longer, websafe base 64 encoded + string that has a longer expiration time. + + Args: + request (:class:`~.notification_service.GetNotificationChannelVerificationCodeRequest`): + The request object. The + `GetNotificationChannelVerificationCode` request. + name (:class:`str`): + Required. The notification channel + for which a verification code is to be + generated and retrieved. This must name + a channel that is already verified; if + the specified channel is not verified, + the request will fail. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification_service.GetNotificationChannelVerificationCodeResponse: + The ``GetNotificationChannelVerificationCode`` request. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.GetNotificationChannelVerificationCodeRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_notification_channel_verification_code, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def verify_notification_channel( + self, + request: notification_service.VerifyNotificationChannelRequest = None, + *, + name: str = None, + code: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannel: + r"""Verifies a ``NotificationChannel`` by proving receipt of the + code delivered to the channel as a result of calling + ``SendNotificationChannelVerificationCode``. + + Args: + request (:class:`~.notification_service.VerifyNotificationChannelRequest`): + The request object. The `VerifyNotificationChannel` + request. + name (:class:`str`): + Required. The notification channel to + verify. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + code (:class:`str`): + Required. The verification code that was delivered to + the channel as a result of invoking the + ``SendNotificationChannelVerificationCode`` API method + or that was retrieved from a verified channel via + ``GetNotificationChannelVerificationCode``. For example, + one might have "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" + (in general, one is only guaranteed that the code is + valid UTF-8; one should not make any assumptions + regarding the structure or format of the code). + This corresponds to the ``code`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannel: + A ``NotificationChannel`` is a medium through which an + alert is delivered when a policy violation is detected. + Examples of channels include email, SMS, and third-party + messaging applications. Fields containing sensitive + information like authentication tokens or contact info + are only partially populated on retrieval. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, code]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notification_service.VerifyNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if code is not None: + request.code = code + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.verify_notification_channel, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("NotificationChannelServiceAsyncClient",) diff --git a/google/cloud/monitoring_v3/services/notification_channel_service/client.py b/google/cloud/monitoring_v3/services/notification_channel_service/client.py new file mode 100644 index 00000000..b8feecbe --- /dev/null +++ b/google/cloud/monitoring_v3/services/notification_channel_service/client.py @@ -0,0 +1,1305 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import label_pb2 as label # type: ignore +from google.api import launch_stage_pb2 as launch_stage # type: ignore +from google.cloud.monitoring_v3.services.notification_channel_service import pagers +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import notification +from google.cloud.monitoring_v3.types import notification_service +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + +from .transports.base import NotificationChannelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import NotificationChannelServiceGrpcTransport +from .transports.grpc_asyncio import NotificationChannelServiceGrpcAsyncIOTransport + + +class NotificationChannelServiceClientMeta(type): + """Metaclass for the NotificationChannelService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[NotificationChannelServiceTransport]] + _transport_registry["grpc"] = NotificationChannelServiceGrpcTransport + _transport_registry["grpc_asyncio"] = NotificationChannelServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[NotificationChannelServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NotificationChannelServiceClient(metaclass=NotificationChannelServiceClientMeta): + """The Notification Channel API provides access to configuration + that controls how messages related to incidents are sent. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "monitoring.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def notification_channel_path(project: str, notification_channel: str,) -> str: + """Return a fully-qualified notification_channel string.""" + return "projects/{project}/notificationChannels/{notification_channel}".format( + project=project, notification_channel=notification_channel, + ) + + @staticmethod + def parse_notification_channel_path(path: str) -> Dict[str, str]: + """Parse a notification_channel path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/notificationChannels/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def notification_channel_descriptor_path( + project: str, channel_descriptor: str, + ) -> str: + """Return a fully-qualified notification_channel_descriptor string.""" + return "projects/{project}/notificationChannelDescriptors/{channel_descriptor}".format( + project=project, channel_descriptor=channel_descriptor, + ) + + @staticmethod + def parse_notification_channel_descriptor_path(path: str) -> Dict[str, str]: + """Parse a notification_channel_descriptor path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/notificationChannelDescriptors/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, NotificationChannelServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the notification channel service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.NotificationChannelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, NotificationChannelServiceTransport): + # transport is a NotificationChannelServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_notification_channel_descriptors( + self, + request: notification_service.ListNotificationChannelDescriptorsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotificationChannelDescriptorsPager: + r"""Lists the descriptors for supported channel types. + The use of descriptors makes it possible for new channel + types to be dynamically added. + + Args: + request (:class:`~.notification_service.ListNotificationChannelDescriptorsRequest`): + The request object. The + `ListNotificationChannelDescriptors` request. + name (:class:`str`): + Required. The REST resource name of the parent from + which to retrieve the notification channel descriptors. + The expected syntax is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this names the parent container in which to + look for the descriptors; to retrieve a single + descriptor by name, use the + [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + operation, instead. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListNotificationChannelDescriptorsPager: + The ``ListNotificationChannelDescriptors`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.ListNotificationChannelDescriptorsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.ListNotificationChannelDescriptorsRequest + ): + request = notification_service.ListNotificationChannelDescriptorsRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_notification_channel_descriptors + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNotificationChannelDescriptorsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_notification_channel_descriptor( + self, + request: notification_service.GetNotificationChannelDescriptorRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannelDescriptor: + r"""Gets a single channel descriptor. The descriptor + indicates which fields are expected / permitted for a + notification channel of the given type. + + Args: + request (:class:`~.notification_service.GetNotificationChannelDescriptorRequest`): + The request object. The + `GetNotificationChannelDescriptor` response. + name (:class:`str`): + Required. The channel type for which to execute the + request. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannelDescriptor: + A description of a notification + channel. The descriptor includes the + properties of the channel and the set of + labels or fields that must be specified + to configure channels of a given type. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.GetNotificationChannelDescriptorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.GetNotificationChannelDescriptorRequest + ): + request = notification_service.GetNotificationChannelDescriptorRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_notification_channel_descriptor + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_notification_channels( + self, + request: notification_service.ListNotificationChannelsRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotificationChannelsPager: + r"""Lists the notification channels that have been + created for the project. + + Args: + request (:class:`~.notification_service.ListNotificationChannelsRequest`): + The request object. The `ListNotificationChannels` + request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + This names the container in which to look for the + notification channels; it does not name a specific + channel. To query a specific channel by REST resource + name, use the + [``GetNotificationChannel``][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + operation. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListNotificationChannelsPager: + The ``ListNotificationChannels`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.ListNotificationChannelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.ListNotificationChannelsRequest + ): + request = notification_service.ListNotificationChannelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_notification_channels + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNotificationChannelsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_notification_channel( + self, + request: notification_service.GetNotificationChannelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannel: + r"""Gets a single notification channel. The channel + includes the relevant configuration details with which + the channel was created. However, the response may + truncate or omit passwords, API keys, or other private + key matter and thus the response may not be 100% + identical to the information that was supplied in the + call to the create method. + + Args: + request (:class:`~.notification_service.GetNotificationChannelRequest`): + The request object. The `GetNotificationChannel` + request. + name (:class:`str`): + Required. The channel for which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannel: + A ``NotificationChannel`` is a medium through which an + alert is delivered when a policy violation is detected. + Examples of channels include email, SMS, and third-party + messaging applications. Fields containing sensitive + information like authentication tokens or contact info + are only partially populated on retrieval. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.GetNotificationChannelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notification_service.GetNotificationChannelRequest): + request = notification_service.GetNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_notification_channel] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_notification_channel( + self, + request: notification_service.CreateNotificationChannelRequest = None, + *, + name: str = None, + notification_channel: notification.NotificationChannel = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannel: + r"""Creates a new notification channel, representing a + single notification endpoint such as an email address, + SMS number, or PagerDuty service. + + Args: + request (:class:`~.notification_service.CreateNotificationChannelRequest`): + The request object. The `CreateNotificationChannel` + request. + name (:class:`str`): + Required. The project on which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + This names the container into which the channel will be + written, this does not name the newly created channel. + The resulting channel's name will have a normalized + version of this field as a prefix, but will add + ``/notificationChannels/[CHANNEL_ID]`` to identify the + channel. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notification_channel (:class:`~.notification.NotificationChannel`): + Required. The definition of the ``NotificationChannel`` + to create. + This corresponds to the ``notification_channel`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannel: + A ``NotificationChannel`` is a medium through which an + alert is delivered when a policy violation is detected. + Examples of channels include email, SMS, and third-party + messaging applications. Fields containing sensitive + information like authentication tokens or contact info + are only partially populated on retrieval. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, notification_channel]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.CreateNotificationChannelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.CreateNotificationChannelRequest + ): + request = notification_service.CreateNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if notification_channel is not None: + request.notification_channel = notification_channel + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_notification_channel + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_notification_channel( + self, + request: notification_service.UpdateNotificationChannelRequest = None, + *, + update_mask: field_mask.FieldMask = None, + notification_channel: notification.NotificationChannel = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannel: + r"""Updates a notification channel. Fields not specified + in the field mask remain unchanged. + + Args: + request (:class:`~.notification_service.UpdateNotificationChannelRequest`): + The request object. The `UpdateNotificationChannel` + request. + update_mask (:class:`~.field_mask.FieldMask`): + The fields to update. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notification_channel (:class:`~.notification.NotificationChannel`): + Required. A description of the changes to be applied to + the specified notification channel. The description must + provide a definition for fields to be updated; the names + of these fields should also be included in the + ``update_mask``. + This corresponds to the ``notification_channel`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannel: + A ``NotificationChannel`` is a medium through which an + alert is delivered when a policy violation is detected. + Examples of channels include email, SMS, and third-party + messaging applications. Fields containing sensitive + information like authentication tokens or contact info + are only partially populated on retrieval. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([update_mask, notification_channel]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.UpdateNotificationChannelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.UpdateNotificationChannelRequest + ): + request = notification_service.UpdateNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if update_mask is not None: + request.update_mask = update_mask + if notification_channel is not None: + request.notification_channel = notification_channel + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_notification_channel + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("notification_channel.name", request.notification_channel.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_notification_channel( + self, + request: notification_service.DeleteNotificationChannelRequest = None, + *, + name: str = None, + force: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a notification channel. + + Args: + request (:class:`~.notification_service.DeleteNotificationChannelRequest`): + The request object. The `DeleteNotificationChannel` + request. + name (:class:`str`): + Required. The channel for which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (:class:`bool`): + If true, the notification channel + will be deleted regardless of its use in + alert policies (the policies will be + updated to remove the channel). If + false, channels that are still + referenced by an existing alerting + policy will fail to be deleted in a + delete operation. + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.DeleteNotificationChannelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.DeleteNotificationChannelRequest + ): + request = notification_service.DeleteNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_notification_channel + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def send_notification_channel_verification_code( + self, + request: notification_service.SendNotificationChannelVerificationCodeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Causes a verification code to be delivered to the channel. The + code can then be supplied in ``VerifyNotificationChannel`` to + verify the channel. + + Args: + request (:class:`~.notification_service.SendNotificationChannelVerificationCodeRequest`): + The request object. The + `SendNotificationChannelVerificationCode` request. + name (:class:`str`): + Required. The notification channel to + which to send a verification code. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.SendNotificationChannelVerificationCodeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.SendNotificationChannelVerificationCodeRequest + ): + request = notification_service.SendNotificationChannelVerificationCodeRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.send_notification_channel_verification_code + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def get_notification_channel_verification_code( + self, + request: notification_service.GetNotificationChannelVerificationCodeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification_service.GetNotificationChannelVerificationCodeResponse: + r"""Requests a verification code for an already verified + channel that can then be used in a call to + VerifyNotificationChannel() on a different channel with + an equivalent identity in the same or in a different + project. This makes it possible to copy a channel + between projects without requiring manual reverification + of the channel. If the channel is not in the verified + state, this method will fail (in other words, this may + only be used if the + SendNotificationChannelVerificationCode and + VerifyNotificationChannel paths have already been used + to put the given channel into the verified state). + + There is no guarantee that the verification codes + returned by this method will be of a similar structure + or form as the ones that are delivered to the channel + via SendNotificationChannelVerificationCode; while + VerifyNotificationChannel() will recognize both the + codes delivered via + SendNotificationChannelVerificationCode() and returned + from GetNotificationChannelVerificationCode(), it is + typically the case that the verification codes delivered + via + SendNotificationChannelVerificationCode() will be + shorter and also have a shorter expiration (e.g. codes + such as "G-123456") whereas GetVerificationCode() will + typically return a much longer, websafe base 64 encoded + string that has a longer expiration time. + + Args: + request (:class:`~.notification_service.GetNotificationChannelVerificationCodeRequest`): + The request object. The + `GetNotificationChannelVerificationCode` request. + name (:class:`str`): + Required. The notification channel + for which a verification code is to be + generated and retrieved. This must name + a channel that is already verified; if + the specified channel is not verified, + the request will fail. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification_service.GetNotificationChannelVerificationCodeResponse: + The ``GetNotificationChannelVerificationCode`` request. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.GetNotificationChannelVerificationCodeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.GetNotificationChannelVerificationCodeRequest + ): + request = notification_service.GetNotificationChannelVerificationCodeRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_notification_channel_verification_code + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def verify_notification_channel( + self, + request: notification_service.VerifyNotificationChannelRequest = None, + *, + name: str = None, + code: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notification.NotificationChannel: + r"""Verifies a ``NotificationChannel`` by proving receipt of the + code delivered to the channel as a result of calling + ``SendNotificationChannelVerificationCode``. + + Args: + request (:class:`~.notification_service.VerifyNotificationChannelRequest`): + The request object. The `VerifyNotificationChannel` + request. + name (:class:`str`): + Required. The notification channel to + verify. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + code (:class:`str`): + Required. The verification code that was delivered to + the channel as a result of invoking the + ``SendNotificationChannelVerificationCode`` API method + or that was retrieved from a verified channel via + ``GetNotificationChannelVerificationCode``. For example, + one might have "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" + (in general, one is only guaranteed that the code is + valid UTF-8; one should not make any assumptions + regarding the structure or format of the code). + This corresponds to the ``code`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notification.NotificationChannel: + A ``NotificationChannel`` is a medium through which an + alert is delivered when a policy violation is detected. + Examples of channels include email, SMS, and third-party + messaging applications. Fields containing sensitive + information like authentication tokens or contact info + are only partially populated on retrieval. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, code]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notification_service.VerifyNotificationChannelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notification_service.VerifyNotificationChannelRequest + ): + request = notification_service.VerifyNotificationChannelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if code is not None: + request.code = code + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.verify_notification_channel + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("NotificationChannelServiceClient",) diff --git a/google/cloud/monitoring_v3/services/notification_channel_service/pagers.py b/google/cloud/monitoring_v3/services/notification_channel_service/pagers.py new file mode 100644 index 00000000..3e7e6387 --- /dev/null +++ b/google/cloud/monitoring_v3/services/notification_channel_service/pagers.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.monitoring_v3.types import notification +from google.cloud.monitoring_v3.types import notification_service + + +class ListNotificationChannelDescriptorsPager: + """A pager for iterating through ``list_notification_channel_descriptors`` requests. + + This class thinly wraps an initial + :class:`~.notification_service.ListNotificationChannelDescriptorsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``channel_descriptors`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNotificationChannelDescriptors`` requests and continue to iterate + through the ``channel_descriptors`` field on the + corresponding responses. + + All the usual :class:`~.notification_service.ListNotificationChannelDescriptorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., notification_service.ListNotificationChannelDescriptorsResponse + ], + request: notification_service.ListNotificationChannelDescriptorsRequest, + response: notification_service.ListNotificationChannelDescriptorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.notification_service.ListNotificationChannelDescriptorsRequest`): + The initial request object. + response (:class:`~.notification_service.ListNotificationChannelDescriptorsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notification_service.ListNotificationChannelDescriptorsRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterable[notification_service.ListNotificationChannelDescriptorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[notification.NotificationChannelDescriptor]: + for page in self.pages: + yield from page.channel_descriptors + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotificationChannelDescriptorsAsyncPager: + """A pager for iterating through ``list_notification_channel_descriptors`` requests. + + This class thinly wraps an initial + :class:`~.notification_service.ListNotificationChannelDescriptorsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``channel_descriptors`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListNotificationChannelDescriptors`` requests and continue to iterate + through the ``channel_descriptors`` field on the + corresponding responses. + + All the usual :class:`~.notification_service.ListNotificationChannelDescriptorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., + Awaitable[notification_service.ListNotificationChannelDescriptorsResponse], + ], + request: notification_service.ListNotificationChannelDescriptorsRequest, + response: notification_service.ListNotificationChannelDescriptorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.notification_service.ListNotificationChannelDescriptorsRequest`): + The initial request object. + response (:class:`~.notification_service.ListNotificationChannelDescriptorsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notification_service.ListNotificationChannelDescriptorsRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[notification_service.ListNotificationChannelDescriptorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[notification.NotificationChannelDescriptor]: + async def async_generator(): + async for page in self.pages: + for response in page.channel_descriptors: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotificationChannelsPager: + """A pager for iterating through ``list_notification_channels`` requests. + + This class thinly wraps an initial + :class:`~.notification_service.ListNotificationChannelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``notification_channels`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNotificationChannels`` requests and continue to iterate + through the ``notification_channels`` field on the + corresponding responses. + + All the usual :class:`~.notification_service.ListNotificationChannelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., notification_service.ListNotificationChannelsResponse], + request: notification_service.ListNotificationChannelsRequest, + response: notification_service.ListNotificationChannelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.notification_service.ListNotificationChannelsRequest`): + The initial request object. + response (:class:`~.notification_service.ListNotificationChannelsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notification_service.ListNotificationChannelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[notification_service.ListNotificationChannelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[notification.NotificationChannel]: + for page in self.pages: + yield from page.notification_channels + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotificationChannelsAsyncPager: + """A pager for iterating through ``list_notification_channels`` requests. + + This class thinly wraps an initial + :class:`~.notification_service.ListNotificationChannelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``notification_channels`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListNotificationChannels`` requests and continue to iterate + through the ``notification_channels`` field on the + corresponding responses. + + All the usual :class:`~.notification_service.ListNotificationChannelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[notification_service.ListNotificationChannelsResponse] + ], + request: notification_service.ListNotificationChannelsRequest, + response: notification_service.ListNotificationChannelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.notification_service.ListNotificationChannelsRequest`): + The initial request object. + response (:class:`~.notification_service.ListNotificationChannelsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notification_service.ListNotificationChannelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[notification_service.ListNotificationChannelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[notification.NotificationChannel]: + async def async_generator(): + async for page in self.pages: + for response in page.notification_channels: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/monitoring_v3/services/notification_channel_service/transports/__init__.py b/google/cloud/monitoring_v3/services/notification_channel_service/transports/__init__.py new file mode 100644 index 00000000..5c9458a1 --- /dev/null +++ b/google/cloud/monitoring_v3/services/notification_channel_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import NotificationChannelServiceTransport +from .grpc import NotificationChannelServiceGrpcTransport +from .grpc_asyncio import NotificationChannelServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[NotificationChannelServiceTransport]] +_transport_registry["grpc"] = NotificationChannelServiceGrpcTransport +_transport_registry["grpc_asyncio"] = NotificationChannelServiceGrpcAsyncIOTransport + + +__all__ = ( + "NotificationChannelServiceTransport", + "NotificationChannelServiceGrpcTransport", + "NotificationChannelServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/monitoring_v3/services/notification_channel_service/transports/base.py b/google/cloud/monitoring_v3/services/notification_channel_service/transports/base.py new file mode 100644 index 00000000..5c688215 --- /dev/null +++ b/google/cloud/monitoring_v3/services/notification_channel_service/transports/base.py @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.monitoring_v3.types import notification +from google.cloud.monitoring_v3.types import notification_service +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class NotificationChannelServiceTransport(abc.ABC): + """Abstract transport class for NotificationChannelService.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_notification_channel_descriptors: gapic_v1.method.wrap_method( + self.list_notification_channel_descriptors, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.get_notification_channel_descriptor: gapic_v1.method.wrap_method( + self.get_notification_channel_descriptor, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.list_notification_channels: gapic_v1.method.wrap_method( + self.list_notification_channels, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.get_notification_channel: gapic_v1.method.wrap_method( + self.get_notification_channel, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.create_notification_channel: gapic_v1.method.wrap_method( + self.create_notification_channel, + default_timeout=30.0, + client_info=client_info, + ), + self.update_notification_channel: gapic_v1.method.wrap_method( + self.update_notification_channel, + default_timeout=30.0, + client_info=client_info, + ), + self.delete_notification_channel: gapic_v1.method.wrap_method( + self.delete_notification_channel, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.send_notification_channel_verification_code: gapic_v1.method.wrap_method( + self.send_notification_channel_verification_code, + default_timeout=30.0, + client_info=client_info, + ), + self.get_notification_channel_verification_code: gapic_v1.method.wrap_method( + self.get_notification_channel_verification_code, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.verify_notification_channel: gapic_v1.method.wrap_method( + self.verify_notification_channel, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + } + + @property + def list_notification_channel_descriptors( + self, + ) -> typing.Callable[ + [notification_service.ListNotificationChannelDescriptorsRequest], + typing.Union[ + notification_service.ListNotificationChannelDescriptorsResponse, + typing.Awaitable[ + notification_service.ListNotificationChannelDescriptorsResponse + ], + ], + ]: + raise NotImplementedError() + + @property + def get_notification_channel_descriptor( + self, + ) -> typing.Callable[ + [notification_service.GetNotificationChannelDescriptorRequest], + typing.Union[ + notification.NotificationChannelDescriptor, + typing.Awaitable[notification.NotificationChannelDescriptor], + ], + ]: + raise NotImplementedError() + + @property + def list_notification_channels( + self, + ) -> typing.Callable[ + [notification_service.ListNotificationChannelsRequest], + typing.Union[ + notification_service.ListNotificationChannelsResponse, + typing.Awaitable[notification_service.ListNotificationChannelsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_notification_channel( + self, + ) -> typing.Callable[ + [notification_service.GetNotificationChannelRequest], + typing.Union[ + notification.NotificationChannel, + typing.Awaitable[notification.NotificationChannel], + ], + ]: + raise NotImplementedError() + + @property + def create_notification_channel( + self, + ) -> typing.Callable[ + [notification_service.CreateNotificationChannelRequest], + typing.Union[ + notification.NotificationChannel, + typing.Awaitable[notification.NotificationChannel], + ], + ]: + raise NotImplementedError() + + @property + def update_notification_channel( + self, + ) -> typing.Callable[ + [notification_service.UpdateNotificationChannelRequest], + typing.Union[ + notification.NotificationChannel, + typing.Awaitable[notification.NotificationChannel], + ], + ]: + raise NotImplementedError() + + @property + def delete_notification_channel( + self, + ) -> typing.Callable[ + [notification_service.DeleteNotificationChannelRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def send_notification_channel_verification_code( + self, + ) -> typing.Callable[ + [notification_service.SendNotificationChannelVerificationCodeRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def get_notification_channel_verification_code( + self, + ) -> typing.Callable[ + [notification_service.GetNotificationChannelVerificationCodeRequest], + typing.Union[ + notification_service.GetNotificationChannelVerificationCodeResponse, + typing.Awaitable[ + notification_service.GetNotificationChannelVerificationCodeResponse + ], + ], + ]: + raise NotImplementedError() + + @property + def verify_notification_channel( + self, + ) -> typing.Callable[ + [notification_service.VerifyNotificationChannelRequest], + typing.Union[ + notification.NotificationChannel, + typing.Awaitable[notification.NotificationChannel], + ], + ]: + raise NotImplementedError() + + +__all__ = ("NotificationChannelServiceTransport",) diff --git a/google/cloud/monitoring_v3/services/notification_channel_service/transports/grpc.py b/google/cloud/monitoring_v3/services/notification_channel_service/transports/grpc.py new file mode 100644 index 00000000..8d26d140 --- /dev/null +++ b/google/cloud/monitoring_v3/services/notification_channel_service/transports/grpc.py @@ -0,0 +1,580 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.monitoring_v3.types import notification +from google.cloud.monitoring_v3.types import notification_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import NotificationChannelServiceTransport, DEFAULT_CLIENT_INFO + + +class NotificationChannelServiceGrpcTransport(NotificationChannelServiceTransport): + """gRPC backend transport for NotificationChannelService. + + The Notification Channel API provides access to configuration + that controls how messages related to incidents are sent. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_notification_channel_descriptors( + self, + ) -> Callable[ + [notification_service.ListNotificationChannelDescriptorsRequest], + notification_service.ListNotificationChannelDescriptorsResponse, + ]: + r"""Return a callable for the list notification channel + descriptors method over gRPC. + + Lists the descriptors for supported channel types. + The use of descriptors makes it possible for new channel + types to be dynamically added. + + Returns: + Callable[[~.ListNotificationChannelDescriptorsRequest], + ~.ListNotificationChannelDescriptorsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notification_channel_descriptors" not in self._stubs: + self._stubs[ + "list_notification_channel_descriptors" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + request_serializer=notification_service.ListNotificationChannelDescriptorsRequest.serialize, + response_deserializer=notification_service.ListNotificationChannelDescriptorsResponse.deserialize, + ) + return self._stubs["list_notification_channel_descriptors"] + + @property + def get_notification_channel_descriptor( + self, + ) -> Callable[ + [notification_service.GetNotificationChannelDescriptorRequest], + notification.NotificationChannelDescriptor, + ]: + r"""Return a callable for the get notification channel + descriptor method over gRPC. + + Gets a single channel descriptor. The descriptor + indicates which fields are expected / permitted for a + notification channel of the given type. + + Returns: + Callable[[~.GetNotificationChannelDescriptorRequest], + ~.NotificationChannelDescriptor]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notification_channel_descriptor" not in self._stubs: + self._stubs[ + "get_notification_channel_descriptor" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + request_serializer=notification_service.GetNotificationChannelDescriptorRequest.serialize, + response_deserializer=notification.NotificationChannelDescriptor.deserialize, + ) + return self._stubs["get_notification_channel_descriptor"] + + @property + def list_notification_channels( + self, + ) -> Callable[ + [notification_service.ListNotificationChannelsRequest], + notification_service.ListNotificationChannelsResponse, + ]: + r"""Return a callable for the list notification channels method over gRPC. + + Lists the notification channels that have been + created for the project. + + Returns: + Callable[[~.ListNotificationChannelsRequest], + ~.ListNotificationChannelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notification_channels" not in self._stubs: + self._stubs["list_notification_channels"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + request_serializer=notification_service.ListNotificationChannelsRequest.serialize, + response_deserializer=notification_service.ListNotificationChannelsResponse.deserialize, + ) + return self._stubs["list_notification_channels"] + + @property + def get_notification_channel( + self, + ) -> Callable[ + [notification_service.GetNotificationChannelRequest], + notification.NotificationChannel, + ]: + r"""Return a callable for the get notification channel method over gRPC. + + Gets a single notification channel. The channel + includes the relevant configuration details with which + the channel was created. However, the response may + truncate or omit passwords, API keys, or other private + key matter and thus the response may not be 100% + identical to the information that was supplied in the + call to the create method. + + Returns: + Callable[[~.GetNotificationChannelRequest], + ~.NotificationChannel]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notification_channel" not in self._stubs: + self._stubs["get_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + request_serializer=notification_service.GetNotificationChannelRequest.serialize, + response_deserializer=notification.NotificationChannel.deserialize, + ) + return self._stubs["get_notification_channel"] + + @property + def create_notification_channel( + self, + ) -> Callable[ + [notification_service.CreateNotificationChannelRequest], + notification.NotificationChannel, + ]: + r"""Return a callable for the create notification channel method over gRPC. + + Creates a new notification channel, representing a + single notification endpoint such as an email address, + SMS number, or PagerDuty service. + + Returns: + Callable[[~.CreateNotificationChannelRequest], + ~.NotificationChannel]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_notification_channel" not in self._stubs: + self._stubs["create_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + request_serializer=notification_service.CreateNotificationChannelRequest.serialize, + response_deserializer=notification.NotificationChannel.deserialize, + ) + return self._stubs["create_notification_channel"] + + @property + def update_notification_channel( + self, + ) -> Callable[ + [notification_service.UpdateNotificationChannelRequest], + notification.NotificationChannel, + ]: + r"""Return a callable for the update notification channel method over gRPC. + + Updates a notification channel. Fields not specified + in the field mask remain unchanged. + + Returns: + Callable[[~.UpdateNotificationChannelRequest], + ~.NotificationChannel]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_notification_channel" not in self._stubs: + self._stubs["update_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + request_serializer=notification_service.UpdateNotificationChannelRequest.serialize, + response_deserializer=notification.NotificationChannel.deserialize, + ) + return self._stubs["update_notification_channel"] + + @property + def delete_notification_channel( + self, + ) -> Callable[[notification_service.DeleteNotificationChannelRequest], empty.Empty]: + r"""Return a callable for the delete notification channel method over gRPC. + + Deletes a notification channel. + + Returns: + Callable[[~.DeleteNotificationChannelRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notification_channel" not in self._stubs: + self._stubs["delete_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + request_serializer=notification_service.DeleteNotificationChannelRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_notification_channel"] + + @property + def send_notification_channel_verification_code( + self, + ) -> Callable[ + [notification_service.SendNotificationChannelVerificationCodeRequest], + empty.Empty, + ]: + r"""Return a callable for the send notification channel + verification code method over gRPC. + + Causes a verification code to be delivered to the channel. The + code can then be supplied in ``VerifyNotificationChannel`` to + verify the channel. + + Returns: + Callable[[~.SendNotificationChannelVerificationCodeRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "send_notification_channel_verification_code" not in self._stubs: + self._stubs[ + "send_notification_channel_verification_code" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + request_serializer=notification_service.SendNotificationChannelVerificationCodeRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["send_notification_channel_verification_code"] + + @property + def get_notification_channel_verification_code( + self, + ) -> Callable[ + [notification_service.GetNotificationChannelVerificationCodeRequest], + notification_service.GetNotificationChannelVerificationCodeResponse, + ]: + r"""Return a callable for the get notification channel + verification code method over gRPC. + + Requests a verification code for an already verified + channel that can then be used in a call to + VerifyNotificationChannel() on a different channel with + an equivalent identity in the same or in a different + project. This makes it possible to copy a channel + between projects without requiring manual reverification + of the channel. If the channel is not in the verified + state, this method will fail (in other words, this may + only be used if the + SendNotificationChannelVerificationCode and + VerifyNotificationChannel paths have already been used + to put the given channel into the verified state). + + There is no guarantee that the verification codes + returned by this method will be of a similar structure + or form as the ones that are delivered to the channel + via SendNotificationChannelVerificationCode; while + VerifyNotificationChannel() will recognize both the + codes delivered via + SendNotificationChannelVerificationCode() and returned + from GetNotificationChannelVerificationCode(), it is + typically the case that the verification codes delivered + via + SendNotificationChannelVerificationCode() will be + shorter and also have a shorter expiration (e.g. codes + such as "G-123456") whereas GetVerificationCode() will + typically return a much longer, websafe base 64 encoded + string that has a longer expiration time. + + Returns: + Callable[[~.GetNotificationChannelVerificationCodeRequest], + ~.GetNotificationChannelVerificationCodeResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notification_channel_verification_code" not in self._stubs: + self._stubs[ + "get_notification_channel_verification_code" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + request_serializer=notification_service.GetNotificationChannelVerificationCodeRequest.serialize, + response_deserializer=notification_service.GetNotificationChannelVerificationCodeResponse.deserialize, + ) + return self._stubs["get_notification_channel_verification_code"] + + @property + def verify_notification_channel( + self, + ) -> Callable[ + [notification_service.VerifyNotificationChannelRequest], + notification.NotificationChannel, + ]: + r"""Return a callable for the verify notification channel method over gRPC. + + Verifies a ``NotificationChannel`` by proving receipt of the + code delivered to the channel as a result of calling + ``SendNotificationChannelVerificationCode``. + + Returns: + Callable[[~.VerifyNotificationChannelRequest], + ~.NotificationChannel]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "verify_notification_channel" not in self._stubs: + self._stubs["verify_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + request_serializer=notification_service.VerifyNotificationChannelRequest.serialize, + response_deserializer=notification.NotificationChannel.deserialize, + ) + return self._stubs["verify_notification_channel"] + + +__all__ = ("NotificationChannelServiceGrpcTransport",) diff --git a/google/cloud/monitoring_v3/services/notification_channel_service/transports/grpc_asyncio.py b/google/cloud/monitoring_v3/services/notification_channel_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..7216be8e --- /dev/null +++ b/google/cloud/monitoring_v3/services/notification_channel_service/transports/grpc_asyncio.py @@ -0,0 +1,584 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.monitoring_v3.types import notification +from google.cloud.monitoring_v3.types import notification_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import NotificationChannelServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import NotificationChannelServiceGrpcTransport + + +class NotificationChannelServiceGrpcAsyncIOTransport( + NotificationChannelServiceTransport +): + """gRPC AsyncIO backend transport for NotificationChannelService. + + The Notification Channel API provides access to configuration + that controls how messages related to incidents are sent. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_notification_channel_descriptors( + self, + ) -> Callable[ + [notification_service.ListNotificationChannelDescriptorsRequest], + Awaitable[notification_service.ListNotificationChannelDescriptorsResponse], + ]: + r"""Return a callable for the list notification channel + descriptors method over gRPC. + + Lists the descriptors for supported channel types. + The use of descriptors makes it possible for new channel + types to be dynamically added. + + Returns: + Callable[[~.ListNotificationChannelDescriptorsRequest], + Awaitable[~.ListNotificationChannelDescriptorsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notification_channel_descriptors" not in self._stubs: + self._stubs[ + "list_notification_channel_descriptors" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + request_serializer=notification_service.ListNotificationChannelDescriptorsRequest.serialize, + response_deserializer=notification_service.ListNotificationChannelDescriptorsResponse.deserialize, + ) + return self._stubs["list_notification_channel_descriptors"] + + @property + def get_notification_channel_descriptor( + self, + ) -> Callable[ + [notification_service.GetNotificationChannelDescriptorRequest], + Awaitable[notification.NotificationChannelDescriptor], + ]: + r"""Return a callable for the get notification channel + descriptor method over gRPC. + + Gets a single channel descriptor. The descriptor + indicates which fields are expected / permitted for a + notification channel of the given type. + + Returns: + Callable[[~.GetNotificationChannelDescriptorRequest], + Awaitable[~.NotificationChannelDescriptor]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notification_channel_descriptor" not in self._stubs: + self._stubs[ + "get_notification_channel_descriptor" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + request_serializer=notification_service.GetNotificationChannelDescriptorRequest.serialize, + response_deserializer=notification.NotificationChannelDescriptor.deserialize, + ) + return self._stubs["get_notification_channel_descriptor"] + + @property + def list_notification_channels( + self, + ) -> Callable[ + [notification_service.ListNotificationChannelsRequest], + Awaitable[notification_service.ListNotificationChannelsResponse], + ]: + r"""Return a callable for the list notification channels method over gRPC. + + Lists the notification channels that have been + created for the project. + + Returns: + Callable[[~.ListNotificationChannelsRequest], + Awaitable[~.ListNotificationChannelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notification_channels" not in self._stubs: + self._stubs["list_notification_channels"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + request_serializer=notification_service.ListNotificationChannelsRequest.serialize, + response_deserializer=notification_service.ListNotificationChannelsResponse.deserialize, + ) + return self._stubs["list_notification_channels"] + + @property + def get_notification_channel( + self, + ) -> Callable[ + [notification_service.GetNotificationChannelRequest], + Awaitable[notification.NotificationChannel], + ]: + r"""Return a callable for the get notification channel method over gRPC. + + Gets a single notification channel. The channel + includes the relevant configuration details with which + the channel was created. However, the response may + truncate or omit passwords, API keys, or other private + key matter and thus the response may not be 100% + identical to the information that was supplied in the + call to the create method. + + Returns: + Callable[[~.GetNotificationChannelRequest], + Awaitable[~.NotificationChannel]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notification_channel" not in self._stubs: + self._stubs["get_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + request_serializer=notification_service.GetNotificationChannelRequest.serialize, + response_deserializer=notification.NotificationChannel.deserialize, + ) + return self._stubs["get_notification_channel"] + + @property + def create_notification_channel( + self, + ) -> Callable[ + [notification_service.CreateNotificationChannelRequest], + Awaitable[notification.NotificationChannel], + ]: + r"""Return a callable for the create notification channel method over gRPC. + + Creates a new notification channel, representing a + single notification endpoint such as an email address, + SMS number, or PagerDuty service. + + Returns: + Callable[[~.CreateNotificationChannelRequest], + Awaitable[~.NotificationChannel]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_notification_channel" not in self._stubs: + self._stubs["create_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + request_serializer=notification_service.CreateNotificationChannelRequest.serialize, + response_deserializer=notification.NotificationChannel.deserialize, + ) + return self._stubs["create_notification_channel"] + + @property + def update_notification_channel( + self, + ) -> Callable[ + [notification_service.UpdateNotificationChannelRequest], + Awaitable[notification.NotificationChannel], + ]: + r"""Return a callable for the update notification channel method over gRPC. + + Updates a notification channel. Fields not specified + in the field mask remain unchanged. + + Returns: + Callable[[~.UpdateNotificationChannelRequest], + Awaitable[~.NotificationChannel]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_notification_channel" not in self._stubs: + self._stubs["update_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + request_serializer=notification_service.UpdateNotificationChannelRequest.serialize, + response_deserializer=notification.NotificationChannel.deserialize, + ) + return self._stubs["update_notification_channel"] + + @property + def delete_notification_channel( + self, + ) -> Callable[ + [notification_service.DeleteNotificationChannelRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete notification channel method over gRPC. + + Deletes a notification channel. + + Returns: + Callable[[~.DeleteNotificationChannelRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notification_channel" not in self._stubs: + self._stubs["delete_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + request_serializer=notification_service.DeleteNotificationChannelRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_notification_channel"] + + @property + def send_notification_channel_verification_code( + self, + ) -> Callable[ + [notification_service.SendNotificationChannelVerificationCodeRequest], + Awaitable[empty.Empty], + ]: + r"""Return a callable for the send notification channel + verification code method over gRPC. + + Causes a verification code to be delivered to the channel. The + code can then be supplied in ``VerifyNotificationChannel`` to + verify the channel. + + Returns: + Callable[[~.SendNotificationChannelVerificationCodeRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "send_notification_channel_verification_code" not in self._stubs: + self._stubs[ + "send_notification_channel_verification_code" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + request_serializer=notification_service.SendNotificationChannelVerificationCodeRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["send_notification_channel_verification_code"] + + @property + def get_notification_channel_verification_code( + self, + ) -> Callable[ + [notification_service.GetNotificationChannelVerificationCodeRequest], + Awaitable[notification_service.GetNotificationChannelVerificationCodeResponse], + ]: + r"""Return a callable for the get notification channel + verification code method over gRPC. + + Requests a verification code for an already verified + channel that can then be used in a call to + VerifyNotificationChannel() on a different channel with + an equivalent identity in the same or in a different + project. This makes it possible to copy a channel + between projects without requiring manual reverification + of the channel. If the channel is not in the verified + state, this method will fail (in other words, this may + only be used if the + SendNotificationChannelVerificationCode and + VerifyNotificationChannel paths have already been used + to put the given channel into the verified state). + + There is no guarantee that the verification codes + returned by this method will be of a similar structure + or form as the ones that are delivered to the channel + via SendNotificationChannelVerificationCode; while + VerifyNotificationChannel() will recognize both the + codes delivered via + SendNotificationChannelVerificationCode() and returned + from GetNotificationChannelVerificationCode(), it is + typically the case that the verification codes delivered + via + SendNotificationChannelVerificationCode() will be + shorter and also have a shorter expiration (e.g. codes + such as "G-123456") whereas GetVerificationCode() will + typically return a much longer, websafe base 64 encoded + string that has a longer expiration time. + + Returns: + Callable[[~.GetNotificationChannelVerificationCodeRequest], + Awaitable[~.GetNotificationChannelVerificationCodeResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notification_channel_verification_code" not in self._stubs: + self._stubs[ + "get_notification_channel_verification_code" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + request_serializer=notification_service.GetNotificationChannelVerificationCodeRequest.serialize, + response_deserializer=notification_service.GetNotificationChannelVerificationCodeResponse.deserialize, + ) + return self._stubs["get_notification_channel_verification_code"] + + @property + def verify_notification_channel( + self, + ) -> Callable[ + [notification_service.VerifyNotificationChannelRequest], + Awaitable[notification.NotificationChannel], + ]: + r"""Return a callable for the verify notification channel method over gRPC. + + Verifies a ``NotificationChannel`` by proving receipt of the + code delivered to the channel as a result of calling + ``SendNotificationChannelVerificationCode``. + + Returns: + Callable[[~.VerifyNotificationChannelRequest], + Awaitable[~.NotificationChannel]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "verify_notification_channel" not in self._stubs: + self._stubs["verify_notification_channel"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + request_serializer=notification_service.VerifyNotificationChannelRequest.serialize, + response_deserializer=notification.NotificationChannel.deserialize, + ) + return self._stubs["verify_notification_channel"] + + +__all__ = ("NotificationChannelServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/monitoring_v3/services/service_monitoring_service/__init__.py b/google/cloud/monitoring_v3/services/service_monitoring_service/__init__.py new file mode 100644 index 00000000..e0de505e --- /dev/null +++ b/google/cloud/monitoring_v3/services/service_monitoring_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import ServiceMonitoringServiceClient +from .async_client import ServiceMonitoringServiceAsyncClient + +__all__ = ( + "ServiceMonitoringServiceClient", + "ServiceMonitoringServiceAsyncClient", +) diff --git a/google/cloud/monitoring_v3/services/service_monitoring_service/async_client.py b/google/cloud/monitoring_v3/services/service_monitoring_service/async_client.py new file mode 100644 index 00000000..c3a53f7a --- /dev/null +++ b/google/cloud/monitoring_v3/services/service_monitoring_service/async_client.py @@ -0,0 +1,1031 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.monitoring_v3.services.service_monitoring_service import pagers +from google.cloud.monitoring_v3.types import service +from google.cloud.monitoring_v3.types import service as gm_service +from google.cloud.monitoring_v3.types import service_service +from google.protobuf import duration_pb2 as duration # type: ignore +from google.type import calendar_period_pb2 as calendar_period # type: ignore + +from .transports.base import ServiceMonitoringServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ServiceMonitoringServiceGrpcAsyncIOTransport +from .client import ServiceMonitoringServiceClient + + +class ServiceMonitoringServiceAsyncClient: + """The Cloud Monitoring Service-Oriented Monitoring API has endpoints + for managing and querying aspects of a workspace's services. These + include the ``Service``'s monitored resources, its Service-Level + Objectives, and a taxonomy of categorized Health Metrics. + """ + + _client: ServiceMonitoringServiceClient + + DEFAULT_ENDPOINT = ServiceMonitoringServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ServiceMonitoringServiceClient.DEFAULT_MTLS_ENDPOINT + + service_path = staticmethod(ServiceMonitoringServiceClient.service_path) + parse_service_path = staticmethod(ServiceMonitoringServiceClient.parse_service_path) + service_level_objective_path = staticmethod( + ServiceMonitoringServiceClient.service_level_objective_path + ) + parse_service_level_objective_path = staticmethod( + ServiceMonitoringServiceClient.parse_service_level_objective_path + ) + + common_project_path = staticmethod( + ServiceMonitoringServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + ServiceMonitoringServiceClient.parse_common_project_path + ) + + common_organization_path = staticmethod( + ServiceMonitoringServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + ServiceMonitoringServiceClient.parse_common_organization_path + ) + + common_folder_path = staticmethod(ServiceMonitoringServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + ServiceMonitoringServiceClient.parse_common_folder_path + ) + + common_billing_account_path = staticmethod( + ServiceMonitoringServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + ServiceMonitoringServiceClient.parse_common_billing_account_path + ) + + common_location_path = staticmethod( + ServiceMonitoringServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + ServiceMonitoringServiceClient.parse_common_location_path + ) + + from_service_account_file = ServiceMonitoringServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(ServiceMonitoringServiceClient).get_transport_class, + type(ServiceMonitoringServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, ServiceMonitoringServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the service monitoring service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ServiceMonitoringServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = ServiceMonitoringServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_service( + self, + request: service_service.CreateServiceRequest = None, + *, + parent: str = None, + service: gm_service.Service = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gm_service.Service: + r"""Create a ``Service``. + + Args: + request (:class:`~.service_service.CreateServiceRequest`): + The request object. The `CreateService` request. + parent (:class:`str`): + Required. Resource name of the parent workspace. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service (:class:`~.gm_service.Service`): + Required. The ``Service`` to create. + This corresponds to the ``service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gm_service.Service: + A ``Service`` is a discrete, autonomous, and + network-accessible unit, designed to solve an individual + concern + (`Wikipedia `__). + In Cloud Monitoring, a ``Service`` acts as the root + resource under which operational aspects of the service + are accessible. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, service]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.CreateServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if service is not None: + request.service = service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_service, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_service( + self, + request: service_service.GetServiceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.Service: + r"""Get the named ``Service``. + + Args: + request (:class:`~.service_service.GetServiceRequest`): + The request object. The `GetService` request. + name (:class:`str`): + Required. Resource name of the ``Service``. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.Service: + A ``Service`` is a discrete, autonomous, and + network-accessible unit, designed to solve an individual + concern + (`Wikipedia `__). + In Cloud Monitoring, a ``Service`` acts as the root + resource under which operational aspects of the service + are accessible. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.GetServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_service, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_services( + self, + request: service_service.ListServicesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListServicesAsyncPager: + r"""List ``Service``\ s for this workspace. + + Args: + request (:class:`~.service_service.ListServicesRequest`): + The request object. The `ListServices` request. + parent (:class:`str`): + Required. Resource name of the parent containing the + listed services, either a project or a Monitoring + Workspace. The formats are: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + workspaces/[HOST_PROJECT_ID_OR_NUMBER] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListServicesAsyncPager: + The ``ListServices`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.ListServicesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_services, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListServicesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_service( + self, + request: service_service.UpdateServiceRequest = None, + *, + service: gm_service.Service = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gm_service.Service: + r"""Update this ``Service``. + + Args: + request (:class:`~.service_service.UpdateServiceRequest`): + The request object. The `UpdateService` request. + service (:class:`~.gm_service.Service`): + Required. The ``Service`` to draw updates from. The + given ``name`` specifies the resource to update. + This corresponds to the ``service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gm_service.Service: + A ``Service`` is a discrete, autonomous, and + network-accessible unit, designed to solve an individual + concern + (`Wikipedia `__). + In Cloud Monitoring, a ``Service`` acts as the root + resource under which operational aspects of the service + are accessible. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([service]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.UpdateServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if service is not None: + request.service = service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_service, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("service.name", request.service.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_service( + self, + request: service_service.DeleteServiceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Soft delete this ``Service``. + + Args: + request (:class:`~.service_service.DeleteServiceRequest`): + The request object. The `DeleteService` request. + name (:class:`str`): + Required. Resource name of the ``Service`` to delete. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.DeleteServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_service, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def create_service_level_objective( + self, + request: service_service.CreateServiceLevelObjectiveRequest = None, + *, + parent: str = None, + service_level_objective: service.ServiceLevelObjective = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ServiceLevelObjective: + r"""Create a ``ServiceLevelObjective`` for the given ``Service``. + + Args: + request (:class:`~.service_service.CreateServiceLevelObjectiveRequest`): + The request object. The `CreateServiceLevelObjective` + request. + parent (:class:`str`): + Required. Resource name of the parent ``Service``. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service_level_objective (:class:`~.service.ServiceLevelObjective`): + Required. The ``ServiceLevelObjective`` to create. The + provided ``name`` will be respected if no + ``ServiceLevelObjective`` exists with this name. + This corresponds to the ``service_level_objective`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ServiceLevelObjective: + A Service-Level Objective (SLO) + describes a level of desired good + service. It consists of a service-level + indicator (SLI), a performance goal, and + a period over which the objective is to + be evaluated against that goal. The SLO + can use SLIs defined in a number of + different manners. Typical SLOs might + include "99% of requests in each rolling + week have latency below 200 + milliseconds" or "99.5% of requests in + each calendar month return + successfully." + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, service_level_objective]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.CreateServiceLevelObjectiveRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if service_level_objective is not None: + request.service_level_objective = service_level_objective + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_service_level_objective, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_service_level_objective( + self, + request: service_service.GetServiceLevelObjectiveRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ServiceLevelObjective: + r"""Get a ``ServiceLevelObjective`` by name. + + Args: + request (:class:`~.service_service.GetServiceLevelObjectiveRequest`): + The request object. The `GetServiceLevelObjective` + request. + name (:class:`str`): + Required. Resource name of the ``ServiceLevelObjective`` + to get. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ServiceLevelObjective: + A Service-Level Objective (SLO) + describes a level of desired good + service. It consists of a service-level + indicator (SLI), a performance goal, and + a period over which the objective is to + be evaluated against that goal. The SLO + can use SLIs defined in a number of + different manners. Typical SLOs might + include "99% of requests in each rolling + week have latency below 200 + milliseconds" or "99.5% of requests in + each calendar month return + successfully." + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.GetServiceLevelObjectiveRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_service_level_objective, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_service_level_objectives( + self, + request: service_service.ListServiceLevelObjectivesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListServiceLevelObjectivesAsyncPager: + r"""List the ``ServiceLevelObjective``\ s for the given ``Service``. + + Args: + request (:class:`~.service_service.ListServiceLevelObjectivesRequest`): + The request object. The `ListServiceLevelObjectives` + request. + parent (:class:`str`): + Required. Resource name of the parent containing the + listed SLOs, either a project or a Monitoring Workspace. + The formats are: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListServiceLevelObjectivesAsyncPager: + The ``ListServiceLevelObjectives`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.ListServiceLevelObjectivesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_service_level_objectives, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListServiceLevelObjectivesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_service_level_objective( + self, + request: service_service.UpdateServiceLevelObjectiveRequest = None, + *, + service_level_objective: service.ServiceLevelObjective = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ServiceLevelObjective: + r"""Update the given ``ServiceLevelObjective``. + + Args: + request (:class:`~.service_service.UpdateServiceLevelObjectiveRequest`): + The request object. The `UpdateServiceLevelObjective` + request. + service_level_objective (:class:`~.service.ServiceLevelObjective`): + Required. The ``ServiceLevelObjective`` to draw updates + from. The given ``name`` specifies the resource to + update. + This corresponds to the ``service_level_objective`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ServiceLevelObjective: + A Service-Level Objective (SLO) + describes a level of desired good + service. It consists of a service-level + indicator (SLI), a performance goal, and + a period over which the objective is to + be evaluated against that goal. The SLO + can use SLIs defined in a number of + different manners. Typical SLOs might + include "99% of requests in each rolling + week have latency below 200 + milliseconds" or "99.5% of requests in + each calendar month return + successfully." + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([service_level_objective]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.UpdateServiceLevelObjectiveRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if service_level_objective is not None: + request.service_level_objective = service_level_objective + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_service_level_objective, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ( + "service_level_objective.name", + request.service_level_objective.name, + ), + ) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_service_level_objective( + self, + request: service_service.DeleteServiceLevelObjectiveRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Delete the given ``ServiceLevelObjective``. + + Args: + request (:class:`~.service_service.DeleteServiceLevelObjectiveRequest`): + The request object. The `DeleteServiceLevelObjective` + request. + name (:class:`str`): + Required. Resource name of the ``ServiceLevelObjective`` + to delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service_service.DeleteServiceLevelObjectiveRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_service_level_objective, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ServiceMonitoringServiceAsyncClient",) diff --git a/google/cloud/monitoring_v3/services/service_monitoring_service/client.py b/google/cloud/monitoring_v3/services/service_monitoring_service/client.py new file mode 100644 index 00000000..cfbe98d1 --- /dev/null +++ b/google/cloud/monitoring_v3/services/service_monitoring_service/client.py @@ -0,0 +1,1212 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.monitoring_v3.services.service_monitoring_service import pagers +from google.cloud.monitoring_v3.types import service +from google.cloud.monitoring_v3.types import service as gm_service +from google.cloud.monitoring_v3.types import service_service +from google.protobuf import duration_pb2 as duration # type: ignore +from google.type import calendar_period_pb2 as calendar_period # type: ignore + +from .transports.base import ServiceMonitoringServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ServiceMonitoringServiceGrpcTransport +from .transports.grpc_asyncio import ServiceMonitoringServiceGrpcAsyncIOTransport + + +class ServiceMonitoringServiceClientMeta(type): + """Metaclass for the ServiceMonitoringService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ServiceMonitoringServiceTransport]] + _transport_registry["grpc"] = ServiceMonitoringServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ServiceMonitoringServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[ServiceMonitoringServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ServiceMonitoringServiceClient(metaclass=ServiceMonitoringServiceClientMeta): + """The Cloud Monitoring Service-Oriented Monitoring API has endpoints + for managing and querying aspects of a workspace's services. These + include the ``Service``'s monitored resources, its Service-Level + Objectives, and a taxonomy of categorized Health Metrics. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "monitoring.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def service_path(project: str, service: str,) -> str: + """Return a fully-qualified service string.""" + return "projects/{project}/services/{service}".format( + project=project, service=service, + ) + + @staticmethod + def parse_service_path(path: str) -> Dict[str, str]: + """Parse a service path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/services/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def service_level_objective_path( + project: str, service: str, service_level_objective: str, + ) -> str: + """Return a fully-qualified service_level_objective string.""" + return "projects/{project}/services/{service}/serviceLevelObjectives/{service_level_objective}".format( + project=project, + service=service, + service_level_objective=service_level_objective, + ) + + @staticmethod + def parse_service_level_objective_path(path: str) -> Dict[str, str]: + """Parse a service_level_objective path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/services/(?P.+?)/serviceLevelObjectives/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, ServiceMonitoringServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the service monitoring service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ServiceMonitoringServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ServiceMonitoringServiceTransport): + # transport is a ServiceMonitoringServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_service( + self, + request: service_service.CreateServiceRequest = None, + *, + parent: str = None, + service: gm_service.Service = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gm_service.Service: + r"""Create a ``Service``. + + Args: + request (:class:`~.service_service.CreateServiceRequest`): + The request object. The `CreateService` request. + parent (:class:`str`): + Required. Resource name of the parent workspace. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service (:class:`~.gm_service.Service`): + Required. The ``Service`` to create. + This corresponds to the ``service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gm_service.Service: + A ``Service`` is a discrete, autonomous, and + network-accessible unit, designed to solve an individual + concern + (`Wikipedia `__). + In Cloud Monitoring, a ``Service`` acts as the root + resource under which operational aspects of the service + are accessible. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, service]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.CreateServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.CreateServiceRequest): + request = service_service.CreateServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if service is not None: + request.service = service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_service] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_service( + self, + request: service_service.GetServiceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.Service: + r"""Get the named ``Service``. + + Args: + request (:class:`~.service_service.GetServiceRequest`): + The request object. The `GetService` request. + name (:class:`str`): + Required. Resource name of the ``Service``. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.Service: + A ``Service`` is a discrete, autonomous, and + network-accessible unit, designed to solve an individual + concern + (`Wikipedia `__). + In Cloud Monitoring, a ``Service`` acts as the root + resource under which operational aspects of the service + are accessible. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.GetServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.GetServiceRequest): + request = service_service.GetServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_service] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_services( + self, + request: service_service.ListServicesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListServicesPager: + r"""List ``Service``\ s for this workspace. + + Args: + request (:class:`~.service_service.ListServicesRequest`): + The request object. The `ListServices` request. + parent (:class:`str`): + Required. Resource name of the parent containing the + listed services, either a project or a Monitoring + Workspace. The formats are: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + workspaces/[HOST_PROJECT_ID_OR_NUMBER] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListServicesPager: + The ``ListServices`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.ListServicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.ListServicesRequest): + request = service_service.ListServicesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_services] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListServicesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_service( + self, + request: service_service.UpdateServiceRequest = None, + *, + service: gm_service.Service = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gm_service.Service: + r"""Update this ``Service``. + + Args: + request (:class:`~.service_service.UpdateServiceRequest`): + The request object. The `UpdateService` request. + service (:class:`~.gm_service.Service`): + Required. The ``Service`` to draw updates from. The + given ``name`` specifies the resource to update. + This corresponds to the ``service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gm_service.Service: + A ``Service`` is a discrete, autonomous, and + network-accessible unit, designed to solve an individual + concern + (`Wikipedia `__). + In Cloud Monitoring, a ``Service`` acts as the root + resource under which operational aspects of the service + are accessible. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([service]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.UpdateServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.UpdateServiceRequest): + request = service_service.UpdateServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if service is not None: + request.service = service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_service] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("service.name", request.service.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_service( + self, + request: service_service.DeleteServiceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Soft delete this ``Service``. + + Args: + request (:class:`~.service_service.DeleteServiceRequest`): + The request object. The `DeleteService` request. + name (:class:`str`): + Required. Resource name of the ``Service`` to delete. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.DeleteServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.DeleteServiceRequest): + request = service_service.DeleteServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_service] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def create_service_level_objective( + self, + request: service_service.CreateServiceLevelObjectiveRequest = None, + *, + parent: str = None, + service_level_objective: service.ServiceLevelObjective = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ServiceLevelObjective: + r"""Create a ``ServiceLevelObjective`` for the given ``Service``. + + Args: + request (:class:`~.service_service.CreateServiceLevelObjectiveRequest`): + The request object. The `CreateServiceLevelObjective` + request. + parent (:class:`str`): + Required. Resource name of the parent ``Service``. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service_level_objective (:class:`~.service.ServiceLevelObjective`): + Required. The ``ServiceLevelObjective`` to create. The + provided ``name`` will be respected if no + ``ServiceLevelObjective`` exists with this name. + This corresponds to the ``service_level_objective`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ServiceLevelObjective: + A Service-Level Objective (SLO) + describes a level of desired good + service. It consists of a service-level + indicator (SLI), a performance goal, and + a period over which the objective is to + be evaluated against that goal. The SLO + can use SLIs defined in a number of + different manners. Typical SLOs might + include "99% of requests in each rolling + week have latency below 200 + milliseconds" or "99.5% of requests in + each calendar month return + successfully." + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, service_level_objective]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.CreateServiceLevelObjectiveRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.CreateServiceLevelObjectiveRequest): + request = service_service.CreateServiceLevelObjectiveRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if service_level_objective is not None: + request.service_level_objective = service_level_objective + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_service_level_objective + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_service_level_objective( + self, + request: service_service.GetServiceLevelObjectiveRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ServiceLevelObjective: + r"""Get a ``ServiceLevelObjective`` by name. + + Args: + request (:class:`~.service_service.GetServiceLevelObjectiveRequest`): + The request object. The `GetServiceLevelObjective` + request. + name (:class:`str`): + Required. Resource name of the ``ServiceLevelObjective`` + to get. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ServiceLevelObjective: + A Service-Level Objective (SLO) + describes a level of desired good + service. It consists of a service-level + indicator (SLI), a performance goal, and + a period over which the objective is to + be evaluated against that goal. The SLO + can use SLIs defined in a number of + different manners. Typical SLOs might + include "99% of requests in each rolling + week have latency below 200 + milliseconds" or "99.5% of requests in + each calendar month return + successfully." + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.GetServiceLevelObjectiveRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.GetServiceLevelObjectiveRequest): + request = service_service.GetServiceLevelObjectiveRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_service_level_objective + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_service_level_objectives( + self, + request: service_service.ListServiceLevelObjectivesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListServiceLevelObjectivesPager: + r"""List the ``ServiceLevelObjective``\ s for the given ``Service``. + + Args: + request (:class:`~.service_service.ListServiceLevelObjectivesRequest`): + The request object. The `ListServiceLevelObjectives` + request. + parent (:class:`str`): + Required. Resource name of the parent containing the + listed SLOs, either a project or a Monitoring Workspace. + The formats are: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListServiceLevelObjectivesPager: + The ``ListServiceLevelObjectives`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.ListServiceLevelObjectivesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.ListServiceLevelObjectivesRequest): + request = service_service.ListServiceLevelObjectivesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_service_level_objectives + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListServiceLevelObjectivesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_service_level_objective( + self, + request: service_service.UpdateServiceLevelObjectiveRequest = None, + *, + service_level_objective: service.ServiceLevelObjective = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> service.ServiceLevelObjective: + r"""Update the given ``ServiceLevelObjective``. + + Args: + request (:class:`~.service_service.UpdateServiceLevelObjectiveRequest`): + The request object. The `UpdateServiceLevelObjective` + request. + service_level_objective (:class:`~.service.ServiceLevelObjective`): + Required. The ``ServiceLevelObjective`` to draw updates + from. The given ``name`` specifies the resource to + update. + This corresponds to the ``service_level_objective`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ServiceLevelObjective: + A Service-Level Objective (SLO) + describes a level of desired good + service. It consists of a service-level + indicator (SLI), a performance goal, and + a period over which the objective is to + be evaluated against that goal. The SLO + can use SLIs defined in a number of + different manners. Typical SLOs might + include "99% of requests in each rolling + week have latency below 200 + milliseconds" or "99.5% of requests in + each calendar month return + successfully." + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([service_level_objective]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.UpdateServiceLevelObjectiveRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.UpdateServiceLevelObjectiveRequest): + request = service_service.UpdateServiceLevelObjectiveRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if service_level_objective is not None: + request.service_level_objective = service_level_objective + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_service_level_objective + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ( + "service_level_objective.name", + request.service_level_objective.name, + ), + ) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_service_level_objective( + self, + request: service_service.DeleteServiceLevelObjectiveRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Delete the given ``ServiceLevelObjective``. + + Args: + request (:class:`~.service_service.DeleteServiceLevelObjectiveRequest`): + The request object. The `DeleteServiceLevelObjective` + request. + name (:class:`str`): + Required. Resource name of the ``ServiceLevelObjective`` + to delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service_service.DeleteServiceLevelObjectiveRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service_service.DeleteServiceLevelObjectiveRequest): + request = service_service.DeleteServiceLevelObjectiveRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_service_level_objective + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ServiceMonitoringServiceClient",) diff --git a/google/cloud/monitoring_v3/services/service_monitoring_service/pagers.py b/google/cloud/monitoring_v3/services/service_monitoring_service/pagers.py new file mode 100644 index 00000000..b31caf95 --- /dev/null +++ b/google/cloud/monitoring_v3/services/service_monitoring_service/pagers.py @@ -0,0 +1,281 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.monitoring_v3.types import service +from google.cloud.monitoring_v3.types import service_service + + +class ListServicesPager: + """A pager for iterating through ``list_services`` requests. + + This class thinly wraps an initial + :class:`~.service_service.ListServicesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``services`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListServices`` requests and continue to iterate + through the ``services`` field on the + corresponding responses. + + All the usual :class:`~.service_service.ListServicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service_service.ListServicesResponse], + request: service_service.ListServicesRequest, + response: service_service.ListServicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service_service.ListServicesRequest`): + The initial request object. + response (:class:`~.service_service.ListServicesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service_service.ListServicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service_service.ListServicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[service.Service]: + for page in self.pages: + yield from page.services + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListServicesAsyncPager: + """A pager for iterating through ``list_services`` requests. + + This class thinly wraps an initial + :class:`~.service_service.ListServicesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``services`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListServices`` requests and continue to iterate + through the ``services`` field on the + corresponding responses. + + All the usual :class:`~.service_service.ListServicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service_service.ListServicesResponse]], + request: service_service.ListServicesRequest, + response: service_service.ListServicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service_service.ListServicesRequest`): + The initial request object. + response (:class:`~.service_service.ListServicesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service_service.ListServicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service_service.ListServicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[service.Service]: + async def async_generator(): + async for page in self.pages: + for response in page.services: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListServiceLevelObjectivesPager: + """A pager for iterating through ``list_service_level_objectives`` requests. + + This class thinly wraps an initial + :class:`~.service_service.ListServiceLevelObjectivesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``service_level_objectives`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListServiceLevelObjectives`` requests and continue to iterate + through the ``service_level_objectives`` field on the + corresponding responses. + + All the usual :class:`~.service_service.ListServiceLevelObjectivesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service_service.ListServiceLevelObjectivesResponse], + request: service_service.ListServiceLevelObjectivesRequest, + response: service_service.ListServiceLevelObjectivesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service_service.ListServiceLevelObjectivesRequest`): + The initial request object. + response (:class:`~.service_service.ListServiceLevelObjectivesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service_service.ListServiceLevelObjectivesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service_service.ListServiceLevelObjectivesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[service.ServiceLevelObjective]: + for page in self.pages: + yield from page.service_level_objectives + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListServiceLevelObjectivesAsyncPager: + """A pager for iterating through ``list_service_level_objectives`` requests. + + This class thinly wraps an initial + :class:`~.service_service.ListServiceLevelObjectivesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``service_level_objectives`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListServiceLevelObjectives`` requests and continue to iterate + through the ``service_level_objectives`` field on the + corresponding responses. + + All the usual :class:`~.service_service.ListServiceLevelObjectivesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[service_service.ListServiceLevelObjectivesResponse] + ], + request: service_service.ListServiceLevelObjectivesRequest, + response: service_service.ListServiceLevelObjectivesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service_service.ListServiceLevelObjectivesRequest`): + The initial request object. + response (:class:`~.service_service.ListServiceLevelObjectivesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service_service.ListServiceLevelObjectivesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[service_service.ListServiceLevelObjectivesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[service.ServiceLevelObjective]: + async def async_generator(): + async for page in self.pages: + for response in page.service_level_objectives: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/monitoring_v3/services/service_monitoring_service/transports/__init__.py b/google/cloud/monitoring_v3/services/service_monitoring_service/transports/__init__.py new file mode 100644 index 00000000..ae774881 --- /dev/null +++ b/google/cloud/monitoring_v3/services/service_monitoring_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import ServiceMonitoringServiceTransport +from .grpc import ServiceMonitoringServiceGrpcTransport +from .grpc_asyncio import ServiceMonitoringServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[ServiceMonitoringServiceTransport]] +_transport_registry["grpc"] = ServiceMonitoringServiceGrpcTransport +_transport_registry["grpc_asyncio"] = ServiceMonitoringServiceGrpcAsyncIOTransport + + +__all__ = ( + "ServiceMonitoringServiceTransport", + "ServiceMonitoringServiceGrpcTransport", + "ServiceMonitoringServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/monitoring_v3/services/service_monitoring_service/transports/base.py b/google/cloud/monitoring_v3/services/service_monitoring_service/transports/base.py new file mode 100644 index 00000000..8d2a8b15 --- /dev/null +++ b/google/cloud/monitoring_v3/services/service_monitoring_service/transports/base.py @@ -0,0 +1,318 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.monitoring_v3.types import service +from google.cloud.monitoring_v3.types import service as gm_service +from google.cloud.monitoring_v3.types import service_service +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ServiceMonitoringServiceTransport(abc.ABC): + """Abstract transport class for ServiceMonitoringService.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_service: gapic_v1.method.wrap_method( + self.create_service, default_timeout=30.0, client_info=client_info, + ), + self.get_service: gapic_v1.method.wrap_method( + self.get_service, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.list_services: gapic_v1.method.wrap_method( + self.list_services, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.update_service: gapic_v1.method.wrap_method( + self.update_service, default_timeout=30.0, client_info=client_info, + ), + self.delete_service: gapic_v1.method.wrap_method( + self.delete_service, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.create_service_level_objective: gapic_v1.method.wrap_method( + self.create_service_level_objective, + default_timeout=30.0, + client_info=client_info, + ), + self.get_service_level_objective: gapic_v1.method.wrap_method( + self.get_service_level_objective, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.list_service_level_objectives: gapic_v1.method.wrap_method( + self.list_service_level_objectives, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.update_service_level_objective: gapic_v1.method.wrap_method( + self.update_service_level_objective, + default_timeout=30.0, + client_info=client_info, + ), + self.delete_service_level_objective: gapic_v1.method.wrap_method( + self.delete_service_level_objective, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + } + + @property + def create_service( + self, + ) -> typing.Callable[ + [service_service.CreateServiceRequest], + typing.Union[gm_service.Service, typing.Awaitable[gm_service.Service]], + ]: + raise NotImplementedError() + + @property + def get_service( + self, + ) -> typing.Callable[ + [service_service.GetServiceRequest], + typing.Union[service.Service, typing.Awaitable[service.Service]], + ]: + raise NotImplementedError() + + @property + def list_services( + self, + ) -> typing.Callable[ + [service_service.ListServicesRequest], + typing.Union[ + service_service.ListServicesResponse, + typing.Awaitable[service_service.ListServicesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_service( + self, + ) -> typing.Callable[ + [service_service.UpdateServiceRequest], + typing.Union[gm_service.Service, typing.Awaitable[gm_service.Service]], + ]: + raise NotImplementedError() + + @property + def delete_service( + self, + ) -> typing.Callable[ + [service_service.DeleteServiceRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def create_service_level_objective( + self, + ) -> typing.Callable[ + [service_service.CreateServiceLevelObjectiveRequest], + typing.Union[ + service.ServiceLevelObjective, + typing.Awaitable[service.ServiceLevelObjective], + ], + ]: + raise NotImplementedError() + + @property + def get_service_level_objective( + self, + ) -> typing.Callable[ + [service_service.GetServiceLevelObjectiveRequest], + typing.Union[ + service.ServiceLevelObjective, + typing.Awaitable[service.ServiceLevelObjective], + ], + ]: + raise NotImplementedError() + + @property + def list_service_level_objectives( + self, + ) -> typing.Callable[ + [service_service.ListServiceLevelObjectivesRequest], + typing.Union[ + service_service.ListServiceLevelObjectivesResponse, + typing.Awaitable[service_service.ListServiceLevelObjectivesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_service_level_objective( + self, + ) -> typing.Callable[ + [service_service.UpdateServiceLevelObjectiveRequest], + typing.Union[ + service.ServiceLevelObjective, + typing.Awaitable[service.ServiceLevelObjective], + ], + ]: + raise NotImplementedError() + + @property + def delete_service_level_objective( + self, + ) -> typing.Callable[ + [service_service.DeleteServiceLevelObjectiveRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("ServiceMonitoringServiceTransport",) diff --git a/google/cloud/monitoring_v3/services/service_monitoring_service/transports/grpc.py b/google/cloud/monitoring_v3/services/service_monitoring_service/transports/grpc.py new file mode 100644 index 00000000..a6e4b552 --- /dev/null +++ b/google/cloud/monitoring_v3/services/service_monitoring_service/transports/grpc.py @@ -0,0 +1,520 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.monitoring_v3.types import service +from google.cloud.monitoring_v3.types import service as gm_service +from google.cloud.monitoring_v3.types import service_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import ServiceMonitoringServiceTransport, DEFAULT_CLIENT_INFO + + +class ServiceMonitoringServiceGrpcTransport(ServiceMonitoringServiceTransport): + """gRPC backend transport for ServiceMonitoringService. + + The Cloud Monitoring Service-Oriented Monitoring API has endpoints + for managing and querying aspects of a workspace's services. These + include the ``Service``'s monitored resources, its Service-Level + Objectives, and a taxonomy of categorized Health Metrics. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_service( + self, + ) -> Callable[[service_service.CreateServiceRequest], gm_service.Service]: + r"""Return a callable for the create service method over gRPC. + + Create a ``Service``. + + Returns: + Callable[[~.CreateServiceRequest], + ~.Service]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_service" not in self._stubs: + self._stubs["create_service"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/CreateService", + request_serializer=service_service.CreateServiceRequest.serialize, + response_deserializer=gm_service.Service.deserialize, + ) + return self._stubs["create_service"] + + @property + def get_service( + self, + ) -> Callable[[service_service.GetServiceRequest], service.Service]: + r"""Return a callable for the get service method over gRPC. + + Get the named ``Service``. + + Returns: + Callable[[~.GetServiceRequest], + ~.Service]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_service" not in self._stubs: + self._stubs["get_service"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/GetService", + request_serializer=service_service.GetServiceRequest.serialize, + response_deserializer=service.Service.deserialize, + ) + return self._stubs["get_service"] + + @property + def list_services( + self, + ) -> Callable[ + [service_service.ListServicesRequest], service_service.ListServicesResponse + ]: + r"""Return a callable for the list services method over gRPC. + + List ``Service``\ s for this workspace. + + Returns: + Callable[[~.ListServicesRequest], + ~.ListServicesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_services" not in self._stubs: + self._stubs["list_services"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/ListServices", + request_serializer=service_service.ListServicesRequest.serialize, + response_deserializer=service_service.ListServicesResponse.deserialize, + ) + return self._stubs["list_services"] + + @property + def update_service( + self, + ) -> Callable[[service_service.UpdateServiceRequest], gm_service.Service]: + r"""Return a callable for the update service method over gRPC. + + Update this ``Service``. + + Returns: + Callable[[~.UpdateServiceRequest], + ~.Service]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_service" not in self._stubs: + self._stubs["update_service"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/UpdateService", + request_serializer=service_service.UpdateServiceRequest.serialize, + response_deserializer=gm_service.Service.deserialize, + ) + return self._stubs["update_service"] + + @property + def delete_service( + self, + ) -> Callable[[service_service.DeleteServiceRequest], empty.Empty]: + r"""Return a callable for the delete service method over gRPC. + + Soft delete this ``Service``. + + Returns: + Callable[[~.DeleteServiceRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_service" not in self._stubs: + self._stubs["delete_service"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/DeleteService", + request_serializer=service_service.DeleteServiceRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_service"] + + @property + def create_service_level_objective( + self, + ) -> Callable[ + [service_service.CreateServiceLevelObjectiveRequest], + service.ServiceLevelObjective, + ]: + r"""Return a callable for the create service level objective method over gRPC. + + Create a ``ServiceLevelObjective`` for the given ``Service``. + + Returns: + Callable[[~.CreateServiceLevelObjectiveRequest], + ~.ServiceLevelObjective]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_service_level_objective" not in self._stubs: + self._stubs[ + "create_service_level_objective" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", + request_serializer=service_service.CreateServiceLevelObjectiveRequest.serialize, + response_deserializer=service.ServiceLevelObjective.deserialize, + ) + return self._stubs["create_service_level_objective"] + + @property + def get_service_level_objective( + self, + ) -> Callable[ + [service_service.GetServiceLevelObjectiveRequest], service.ServiceLevelObjective + ]: + r"""Return a callable for the get service level objective method over gRPC. + + Get a ``ServiceLevelObjective`` by name. + + Returns: + Callable[[~.GetServiceLevelObjectiveRequest], + ~.ServiceLevelObjective]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_service_level_objective" not in self._stubs: + self._stubs["get_service_level_objective"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", + request_serializer=service_service.GetServiceLevelObjectiveRequest.serialize, + response_deserializer=service.ServiceLevelObjective.deserialize, + ) + return self._stubs["get_service_level_objective"] + + @property + def list_service_level_objectives( + self, + ) -> Callable[ + [service_service.ListServiceLevelObjectivesRequest], + service_service.ListServiceLevelObjectivesResponse, + ]: + r"""Return a callable for the list service level objectives method over gRPC. + + List the ``ServiceLevelObjective``\ s for the given ``Service``. + + Returns: + Callable[[~.ListServiceLevelObjectivesRequest], + ~.ListServiceLevelObjectivesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_service_level_objectives" not in self._stubs: + self._stubs[ + "list_service_level_objectives" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", + request_serializer=service_service.ListServiceLevelObjectivesRequest.serialize, + response_deserializer=service_service.ListServiceLevelObjectivesResponse.deserialize, + ) + return self._stubs["list_service_level_objectives"] + + @property + def update_service_level_objective( + self, + ) -> Callable[ + [service_service.UpdateServiceLevelObjectiveRequest], + service.ServiceLevelObjective, + ]: + r"""Return a callable for the update service level objective method over gRPC. + + Update the given ``ServiceLevelObjective``. + + Returns: + Callable[[~.UpdateServiceLevelObjectiveRequest], + ~.ServiceLevelObjective]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_service_level_objective" not in self._stubs: + self._stubs[ + "update_service_level_objective" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", + request_serializer=service_service.UpdateServiceLevelObjectiveRequest.serialize, + response_deserializer=service.ServiceLevelObjective.deserialize, + ) + return self._stubs["update_service_level_objective"] + + @property + def delete_service_level_objective( + self, + ) -> Callable[[service_service.DeleteServiceLevelObjectiveRequest], empty.Empty]: + r"""Return a callable for the delete service level objective method over gRPC. + + Delete the given ``ServiceLevelObjective``. + + Returns: + Callable[[~.DeleteServiceLevelObjectiveRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_service_level_objective" not in self._stubs: + self._stubs[ + "delete_service_level_objective" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", + request_serializer=service_service.DeleteServiceLevelObjectiveRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_service_level_objective"] + + +__all__ = ("ServiceMonitoringServiceGrpcTransport",) diff --git a/google/cloud/monitoring_v3/services/service_monitoring_service/transports/grpc_asyncio.py b/google/cloud/monitoring_v3/services/service_monitoring_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..8614483f --- /dev/null +++ b/google/cloud/monitoring_v3/services/service_monitoring_service/transports/grpc_asyncio.py @@ -0,0 +1,528 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.monitoring_v3.types import service +from google.cloud.monitoring_v3.types import service as gm_service +from google.cloud.monitoring_v3.types import service_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import ServiceMonitoringServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import ServiceMonitoringServiceGrpcTransport + + +class ServiceMonitoringServiceGrpcAsyncIOTransport(ServiceMonitoringServiceTransport): + """gRPC AsyncIO backend transport for ServiceMonitoringService. + + The Cloud Monitoring Service-Oriented Monitoring API has endpoints + for managing and querying aspects of a workspace's services. These + include the ``Service``'s monitored resources, its Service-Level + Objectives, and a taxonomy of categorized Health Metrics. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_service( + self, + ) -> Callable[ + [service_service.CreateServiceRequest], Awaitable[gm_service.Service] + ]: + r"""Return a callable for the create service method over gRPC. + + Create a ``Service``. + + Returns: + Callable[[~.CreateServiceRequest], + Awaitable[~.Service]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_service" not in self._stubs: + self._stubs["create_service"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/CreateService", + request_serializer=service_service.CreateServiceRequest.serialize, + response_deserializer=gm_service.Service.deserialize, + ) + return self._stubs["create_service"] + + @property + def get_service( + self, + ) -> Callable[[service_service.GetServiceRequest], Awaitable[service.Service]]: + r"""Return a callable for the get service method over gRPC. + + Get the named ``Service``. + + Returns: + Callable[[~.GetServiceRequest], + Awaitable[~.Service]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_service" not in self._stubs: + self._stubs["get_service"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/GetService", + request_serializer=service_service.GetServiceRequest.serialize, + response_deserializer=service.Service.deserialize, + ) + return self._stubs["get_service"] + + @property + def list_services( + self, + ) -> Callable[ + [service_service.ListServicesRequest], + Awaitable[service_service.ListServicesResponse], + ]: + r"""Return a callable for the list services method over gRPC. + + List ``Service``\ s for this workspace. + + Returns: + Callable[[~.ListServicesRequest], + Awaitable[~.ListServicesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_services" not in self._stubs: + self._stubs["list_services"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/ListServices", + request_serializer=service_service.ListServicesRequest.serialize, + response_deserializer=service_service.ListServicesResponse.deserialize, + ) + return self._stubs["list_services"] + + @property + def update_service( + self, + ) -> Callable[ + [service_service.UpdateServiceRequest], Awaitable[gm_service.Service] + ]: + r"""Return a callable for the update service method over gRPC. + + Update this ``Service``. + + Returns: + Callable[[~.UpdateServiceRequest], + Awaitable[~.Service]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_service" not in self._stubs: + self._stubs["update_service"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/UpdateService", + request_serializer=service_service.UpdateServiceRequest.serialize, + response_deserializer=gm_service.Service.deserialize, + ) + return self._stubs["update_service"] + + @property + def delete_service( + self, + ) -> Callable[[service_service.DeleteServiceRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete service method over gRPC. + + Soft delete this ``Service``. + + Returns: + Callable[[~.DeleteServiceRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_service" not in self._stubs: + self._stubs["delete_service"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/DeleteService", + request_serializer=service_service.DeleteServiceRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_service"] + + @property + def create_service_level_objective( + self, + ) -> Callable[ + [service_service.CreateServiceLevelObjectiveRequest], + Awaitable[service.ServiceLevelObjective], + ]: + r"""Return a callable for the create service level objective method over gRPC. + + Create a ``ServiceLevelObjective`` for the given ``Service``. + + Returns: + Callable[[~.CreateServiceLevelObjectiveRequest], + Awaitable[~.ServiceLevelObjective]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_service_level_objective" not in self._stubs: + self._stubs[ + "create_service_level_objective" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", + request_serializer=service_service.CreateServiceLevelObjectiveRequest.serialize, + response_deserializer=service.ServiceLevelObjective.deserialize, + ) + return self._stubs["create_service_level_objective"] + + @property + def get_service_level_objective( + self, + ) -> Callable[ + [service_service.GetServiceLevelObjectiveRequest], + Awaitable[service.ServiceLevelObjective], + ]: + r"""Return a callable for the get service level objective method over gRPC. + + Get a ``ServiceLevelObjective`` by name. + + Returns: + Callable[[~.GetServiceLevelObjectiveRequest], + Awaitable[~.ServiceLevelObjective]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_service_level_objective" not in self._stubs: + self._stubs["get_service_level_objective"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", + request_serializer=service_service.GetServiceLevelObjectiveRequest.serialize, + response_deserializer=service.ServiceLevelObjective.deserialize, + ) + return self._stubs["get_service_level_objective"] + + @property + def list_service_level_objectives( + self, + ) -> Callable[ + [service_service.ListServiceLevelObjectivesRequest], + Awaitable[service_service.ListServiceLevelObjectivesResponse], + ]: + r"""Return a callable for the list service level objectives method over gRPC. + + List the ``ServiceLevelObjective``\ s for the given ``Service``. + + Returns: + Callable[[~.ListServiceLevelObjectivesRequest], + Awaitable[~.ListServiceLevelObjectivesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_service_level_objectives" not in self._stubs: + self._stubs[ + "list_service_level_objectives" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", + request_serializer=service_service.ListServiceLevelObjectivesRequest.serialize, + response_deserializer=service_service.ListServiceLevelObjectivesResponse.deserialize, + ) + return self._stubs["list_service_level_objectives"] + + @property + def update_service_level_objective( + self, + ) -> Callable[ + [service_service.UpdateServiceLevelObjectiveRequest], + Awaitable[service.ServiceLevelObjective], + ]: + r"""Return a callable for the update service level objective method over gRPC. + + Update the given ``ServiceLevelObjective``. + + Returns: + Callable[[~.UpdateServiceLevelObjectiveRequest], + Awaitable[~.ServiceLevelObjective]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_service_level_objective" not in self._stubs: + self._stubs[ + "update_service_level_objective" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", + request_serializer=service_service.UpdateServiceLevelObjectiveRequest.serialize, + response_deserializer=service.ServiceLevelObjective.deserialize, + ) + return self._stubs["update_service_level_objective"] + + @property + def delete_service_level_objective( + self, + ) -> Callable[ + [service_service.DeleteServiceLevelObjectiveRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete service level objective method over gRPC. + + Delete the given ``ServiceLevelObjective``. + + Returns: + Callable[[~.DeleteServiceLevelObjectiveRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_service_level_objective" not in self._stubs: + self._stubs[ + "delete_service_level_objective" + ] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", + request_serializer=service_service.DeleteServiceLevelObjectiveRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_service_level_objective"] + + +__all__ = ("ServiceMonitoringServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/monitoring_v3/services/uptime_check_service/__init__.py b/google/cloud/monitoring_v3/services/uptime_check_service/__init__.py new file mode 100644 index 00000000..5fb9abc6 --- /dev/null +++ b/google/cloud/monitoring_v3/services/uptime_check_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import UptimeCheckServiceClient +from .async_client import UptimeCheckServiceAsyncClient + +__all__ = ( + "UptimeCheckServiceClient", + "UptimeCheckServiceAsyncClient", +) diff --git a/google/cloud/monitoring_v3/services/uptime_check_service/async_client.py b/google/cloud/monitoring_v3/services/uptime_check_service/async_client.py new file mode 100644 index 00000000..7e52209c --- /dev/null +++ b/google/cloud/monitoring_v3/services/uptime_check_service/async_client.py @@ -0,0 +1,654 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.services.uptime_check_service import pagers +from google.cloud.monitoring_v3.types import uptime +from google.cloud.monitoring_v3.types import uptime_service +from google.protobuf import duration_pb2 as duration # type: ignore + +from .transports.base import UptimeCheckServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import UptimeCheckServiceGrpcAsyncIOTransport +from .client import UptimeCheckServiceClient + + +class UptimeCheckServiceAsyncClient: + """The UptimeCheckService API is used to manage (list, create, delete, + edit) Uptime check configurations in the Stackdriver Monitoring + product. An Uptime check is a piece of configuration that determines + which resources and services to monitor for availability. These + configurations can also be configured interactively by navigating to + the [Cloud Console] (http://console.cloud.google.com), selecting the + appropriate project, clicking on "Monitoring" on the left-hand side + to navigate to Stackdriver, and then clicking on "Uptime". + """ + + _client: UptimeCheckServiceClient + + DEFAULT_ENDPOINT = UptimeCheckServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = UptimeCheckServiceClient.DEFAULT_MTLS_ENDPOINT + + uptime_check_config_path = staticmethod( + UptimeCheckServiceClient.uptime_check_config_path + ) + parse_uptime_check_config_path = staticmethod( + UptimeCheckServiceClient.parse_uptime_check_config_path + ) + + common_project_path = staticmethod(UptimeCheckServiceClient.common_project_path) + parse_common_project_path = staticmethod( + UptimeCheckServiceClient.parse_common_project_path + ) + + common_organization_path = staticmethod( + UptimeCheckServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + UptimeCheckServiceClient.parse_common_organization_path + ) + + common_folder_path = staticmethod(UptimeCheckServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + UptimeCheckServiceClient.parse_common_folder_path + ) + + common_billing_account_path = staticmethod( + UptimeCheckServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + UptimeCheckServiceClient.parse_common_billing_account_path + ) + + common_location_path = staticmethod(UptimeCheckServiceClient.common_location_path) + parse_common_location_path = staticmethod( + UptimeCheckServiceClient.parse_common_location_path + ) + + from_service_account_file = UptimeCheckServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(UptimeCheckServiceClient).get_transport_class, + type(UptimeCheckServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, UptimeCheckServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the uptime check service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.UptimeCheckServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = UptimeCheckServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_uptime_check_configs( + self, + request: uptime_service.ListUptimeCheckConfigsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUptimeCheckConfigsAsyncPager: + r"""Lists the existing valid Uptime check configurations + for the project (leaving out any invalid + configurations). + + Args: + request (:class:`~.uptime_service.ListUptimeCheckConfigsRequest`): + The request object. The protocol for the + `ListUptimeCheckConfigs` request. + parent (:class:`str`): + Required. The project whose Uptime check configurations + are listed. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListUptimeCheckConfigsAsyncPager: + The protocol for the ``ListUptimeCheckConfigs`` + response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = uptime_service.ListUptimeCheckConfigsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_uptime_check_configs, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListUptimeCheckConfigsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_uptime_check_config( + self, + request: uptime_service.GetUptimeCheckConfigRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> uptime.UptimeCheckConfig: + r"""Gets a single Uptime check configuration. + + Args: + request (:class:`~.uptime_service.GetUptimeCheckConfigRequest`): + The request object. The protocol for the + `GetUptimeCheckConfig` request. + name (:class:`str`): + Required. The Uptime check configuration to retrieve. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.uptime.UptimeCheckConfig: + This message configures which + resources and services to monitor for + availability. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = uptime_service.GetUptimeCheckConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_uptime_check_config, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_uptime_check_config( + self, + request: uptime_service.CreateUptimeCheckConfigRequest = None, + *, + parent: str = None, + uptime_check_config: uptime.UptimeCheckConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> uptime.UptimeCheckConfig: + r"""Creates a new Uptime check configuration. + + Args: + request (:class:`~.uptime_service.CreateUptimeCheckConfigRequest`): + The request object. The protocol for the + `CreateUptimeCheckConfig` request. + parent (:class:`str`): + Required. The project in which to create the Uptime + check. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + uptime_check_config (:class:`~.uptime.UptimeCheckConfig`): + Required. The new Uptime check + configuration. + This corresponds to the ``uptime_check_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.uptime.UptimeCheckConfig: + This message configures which + resources and services to monitor for + availability. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, uptime_check_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = uptime_service.CreateUptimeCheckConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if uptime_check_config is not None: + request.uptime_check_config = uptime_check_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_uptime_check_config, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_uptime_check_config( + self, + request: uptime_service.UpdateUptimeCheckConfigRequest = None, + *, + uptime_check_config: uptime.UptimeCheckConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> uptime.UptimeCheckConfig: + r"""Updates an Uptime check configuration. You can either replace + the entire configuration with a new one or replace only certain + fields in the current configuration by specifying the fields to + be updated via ``updateMask``. Returns the updated + configuration. + + Args: + request (:class:`~.uptime_service.UpdateUptimeCheckConfigRequest`): + The request object. The protocol for the + `UpdateUptimeCheckConfig` request. + uptime_check_config (:class:`~.uptime.UptimeCheckConfig`): + Required. If an ``updateMask`` has been specified, this + field gives the values for the set of fields mentioned + in the ``updateMask``. If an ``updateMask`` has not been + given, this Uptime check configuration replaces the + current configuration. If a field is mentioned in + ``updateMask`` but the corresonding field is omitted in + this partial Uptime check configuration, it has the + effect of deleting/clearing the field from the + configuration on the server. + + The following fields can be updated: ``display_name``, + ``http_check``, ``tcp_check``, ``timeout``, + ``content_matchers``, and ``selected_regions``. + This corresponds to the ``uptime_check_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.uptime.UptimeCheckConfig: + This message configures which + resources and services to monitor for + availability. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([uptime_check_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = uptime_service.UpdateUptimeCheckConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if uptime_check_config is not None: + request.uptime_check_config = uptime_check_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_uptime_check_config, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("uptime_check_config.name", request.uptime_check_config.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_uptime_check_config( + self, + request: uptime_service.DeleteUptimeCheckConfigRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an Uptime check configuration. Note that this + method will fail if the Uptime check configuration is + referenced by an alert policy or other dependent configs + that would be rendered invalid by the deletion. + + Args: + request (:class:`~.uptime_service.DeleteUptimeCheckConfigRequest`): + The request object. The protocol for the + `DeleteUptimeCheckConfig` request. + name (:class:`str`): + Required. The Uptime check configuration to delete. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = uptime_service.DeleteUptimeCheckConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_uptime_check_config, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def list_uptime_check_ips( + self, + request: uptime_service.ListUptimeCheckIpsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUptimeCheckIpsAsyncPager: + r"""Returns the list of IP addresses that checkers run + from + + Args: + request (:class:`~.uptime_service.ListUptimeCheckIpsRequest`): + The request object. The protocol for the + `ListUptimeCheckIps` request. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListUptimeCheckIpsAsyncPager: + The protocol for the ``ListUptimeCheckIps`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + + request = uptime_service.ListUptimeCheckIpsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_uptime_check_ips, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListUptimeCheckIpsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("UptimeCheckServiceAsyncClient",) diff --git a/google/cloud/monitoring_v3/services/uptime_check_service/client.py b/google/cloud/monitoring_v3/services/uptime_check_service/client.py new file mode 100644 index 00000000..72a0f3e8 --- /dev/null +++ b/google/cloud/monitoring_v3/services/uptime_check_service/client.py @@ -0,0 +1,829 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.services.uptime_check_service import pagers +from google.cloud.monitoring_v3.types import uptime +from google.cloud.monitoring_v3.types import uptime_service +from google.protobuf import duration_pb2 as duration # type: ignore + +from .transports.base import UptimeCheckServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import UptimeCheckServiceGrpcTransport +from .transports.grpc_asyncio import UptimeCheckServiceGrpcAsyncIOTransport + + +class UptimeCheckServiceClientMeta(type): + """Metaclass for the UptimeCheckService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[UptimeCheckServiceTransport]] + _transport_registry["grpc"] = UptimeCheckServiceGrpcTransport + _transport_registry["grpc_asyncio"] = UptimeCheckServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[UptimeCheckServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class UptimeCheckServiceClient(metaclass=UptimeCheckServiceClientMeta): + """The UptimeCheckService API is used to manage (list, create, delete, + edit) Uptime check configurations in the Stackdriver Monitoring + product. An Uptime check is a piece of configuration that determines + which resources and services to monitor for availability. These + configurations can also be configured interactively by navigating to + the [Cloud Console] (http://console.cloud.google.com), selecting the + appropriate project, clicking on "Monitoring" on the left-hand side + to navigate to Stackdriver, and then clicking on "Uptime". + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "monitoring.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def uptime_check_config_path(project: str, uptime_check_config: str,) -> str: + """Return a fully-qualified uptime_check_config string.""" + return "projects/{project}/uptimeCheckConfigs/{uptime_check_config}".format( + project=project, uptime_check_config=uptime_check_config, + ) + + @staticmethod + def parse_uptime_check_config_path(path: str) -> Dict[str, str]: + """Parse a uptime_check_config path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/uptimeCheckConfigs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, UptimeCheckServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the uptime check service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.UptimeCheckServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, UptimeCheckServiceTransport): + # transport is a UptimeCheckServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_uptime_check_configs( + self, + request: uptime_service.ListUptimeCheckConfigsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUptimeCheckConfigsPager: + r"""Lists the existing valid Uptime check configurations + for the project (leaving out any invalid + configurations). + + Args: + request (:class:`~.uptime_service.ListUptimeCheckConfigsRequest`): + The request object. The protocol for the + `ListUptimeCheckConfigs` request. + parent (:class:`str`): + Required. The project whose Uptime check configurations + are listed. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListUptimeCheckConfigsPager: + The protocol for the ``ListUptimeCheckConfigs`` + response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a uptime_service.ListUptimeCheckConfigsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, uptime_service.ListUptimeCheckConfigsRequest): + request = uptime_service.ListUptimeCheckConfigsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_uptime_check_configs + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListUptimeCheckConfigsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_uptime_check_config( + self, + request: uptime_service.GetUptimeCheckConfigRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> uptime.UptimeCheckConfig: + r"""Gets a single Uptime check configuration. + + Args: + request (:class:`~.uptime_service.GetUptimeCheckConfigRequest`): + The request object. The protocol for the + `GetUptimeCheckConfig` request. + name (:class:`str`): + Required. The Uptime check configuration to retrieve. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.uptime.UptimeCheckConfig: + This message configures which + resources and services to monitor for + availability. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a uptime_service.GetUptimeCheckConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, uptime_service.GetUptimeCheckConfigRequest): + request = uptime_service.GetUptimeCheckConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_uptime_check_config] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_uptime_check_config( + self, + request: uptime_service.CreateUptimeCheckConfigRequest = None, + *, + parent: str = None, + uptime_check_config: uptime.UptimeCheckConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> uptime.UptimeCheckConfig: + r"""Creates a new Uptime check configuration. + + Args: + request (:class:`~.uptime_service.CreateUptimeCheckConfigRequest`): + The request object. The protocol for the + `CreateUptimeCheckConfig` request. + parent (:class:`str`): + Required. The project in which to create the Uptime + check. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + uptime_check_config (:class:`~.uptime.UptimeCheckConfig`): + Required. The new Uptime check + configuration. + This corresponds to the ``uptime_check_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.uptime.UptimeCheckConfig: + This message configures which + resources and services to monitor for + availability. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, uptime_check_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a uptime_service.CreateUptimeCheckConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, uptime_service.CreateUptimeCheckConfigRequest): + request = uptime_service.CreateUptimeCheckConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if uptime_check_config is not None: + request.uptime_check_config = uptime_check_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_uptime_check_config + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_uptime_check_config( + self, + request: uptime_service.UpdateUptimeCheckConfigRequest = None, + *, + uptime_check_config: uptime.UptimeCheckConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> uptime.UptimeCheckConfig: + r"""Updates an Uptime check configuration. You can either replace + the entire configuration with a new one or replace only certain + fields in the current configuration by specifying the fields to + be updated via ``updateMask``. Returns the updated + configuration. + + Args: + request (:class:`~.uptime_service.UpdateUptimeCheckConfigRequest`): + The request object. The protocol for the + `UpdateUptimeCheckConfig` request. + uptime_check_config (:class:`~.uptime.UptimeCheckConfig`): + Required. If an ``updateMask`` has been specified, this + field gives the values for the set of fields mentioned + in the ``updateMask``. If an ``updateMask`` has not been + given, this Uptime check configuration replaces the + current configuration. If a field is mentioned in + ``updateMask`` but the corresonding field is omitted in + this partial Uptime check configuration, it has the + effect of deleting/clearing the field from the + configuration on the server. + + The following fields can be updated: ``display_name``, + ``http_check``, ``tcp_check``, ``timeout``, + ``content_matchers``, and ``selected_regions``. + This corresponds to the ``uptime_check_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.uptime.UptimeCheckConfig: + This message configures which + resources and services to monitor for + availability. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([uptime_check_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a uptime_service.UpdateUptimeCheckConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, uptime_service.UpdateUptimeCheckConfigRequest): + request = uptime_service.UpdateUptimeCheckConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if uptime_check_config is not None: + request.uptime_check_config = uptime_check_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_uptime_check_config + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("uptime_check_config.name", request.uptime_check_config.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_uptime_check_config( + self, + request: uptime_service.DeleteUptimeCheckConfigRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an Uptime check configuration. Note that this + method will fail if the Uptime check configuration is + referenced by an alert policy or other dependent configs + that would be rendered invalid by the deletion. + + Args: + request (:class:`~.uptime_service.DeleteUptimeCheckConfigRequest`): + The request object. The protocol for the + `DeleteUptimeCheckConfig` request. + name (:class:`str`): + Required. The Uptime check configuration to delete. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a uptime_service.DeleteUptimeCheckConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, uptime_service.DeleteUptimeCheckConfigRequest): + request = uptime_service.DeleteUptimeCheckConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_uptime_check_config + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def list_uptime_check_ips( + self, + request: uptime_service.ListUptimeCheckIpsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUptimeCheckIpsPager: + r"""Returns the list of IP addresses that checkers run + from + + Args: + request (:class:`~.uptime_service.ListUptimeCheckIpsRequest`): + The request object. The protocol for the + `ListUptimeCheckIps` request. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListUptimeCheckIpsPager: + The protocol for the ``ListUptimeCheckIps`` response. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a uptime_service.ListUptimeCheckIpsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, uptime_service.ListUptimeCheckIpsRequest): + request = uptime_service.ListUptimeCheckIpsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_uptime_check_ips] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListUptimeCheckIpsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("UptimeCheckServiceClient",) diff --git a/google/cloud/monitoring_v3/services/uptime_check_service/pagers.py b/google/cloud/monitoring_v3/services/uptime_check_service/pagers.py new file mode 100644 index 00000000..6d370226 --- /dev/null +++ b/google/cloud/monitoring_v3/services/uptime_check_service/pagers.py @@ -0,0 +1,279 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.monitoring_v3.types import uptime +from google.cloud.monitoring_v3.types import uptime_service + + +class ListUptimeCheckConfigsPager: + """A pager for iterating through ``list_uptime_check_configs`` requests. + + This class thinly wraps an initial + :class:`~.uptime_service.ListUptimeCheckConfigsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``uptime_check_configs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListUptimeCheckConfigs`` requests and continue to iterate + through the ``uptime_check_configs`` field on the + corresponding responses. + + All the usual :class:`~.uptime_service.ListUptimeCheckConfigsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., uptime_service.ListUptimeCheckConfigsResponse], + request: uptime_service.ListUptimeCheckConfigsRequest, + response: uptime_service.ListUptimeCheckConfigsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.uptime_service.ListUptimeCheckConfigsRequest`): + The initial request object. + response (:class:`~.uptime_service.ListUptimeCheckConfigsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = uptime_service.ListUptimeCheckConfigsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[uptime_service.ListUptimeCheckConfigsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[uptime.UptimeCheckConfig]: + for page in self.pages: + yield from page.uptime_check_configs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListUptimeCheckConfigsAsyncPager: + """A pager for iterating through ``list_uptime_check_configs`` requests. + + This class thinly wraps an initial + :class:`~.uptime_service.ListUptimeCheckConfigsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``uptime_check_configs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListUptimeCheckConfigs`` requests and continue to iterate + through the ``uptime_check_configs`` field on the + corresponding responses. + + All the usual :class:`~.uptime_service.ListUptimeCheckConfigsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[uptime_service.ListUptimeCheckConfigsResponse]], + request: uptime_service.ListUptimeCheckConfigsRequest, + response: uptime_service.ListUptimeCheckConfigsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.uptime_service.ListUptimeCheckConfigsRequest`): + The initial request object. + response (:class:`~.uptime_service.ListUptimeCheckConfigsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = uptime_service.ListUptimeCheckConfigsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[uptime_service.ListUptimeCheckConfigsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[uptime.UptimeCheckConfig]: + async def async_generator(): + async for page in self.pages: + for response in page.uptime_check_configs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListUptimeCheckIpsPager: + """A pager for iterating through ``list_uptime_check_ips`` requests. + + This class thinly wraps an initial + :class:`~.uptime_service.ListUptimeCheckIpsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``uptime_check_ips`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListUptimeCheckIps`` requests and continue to iterate + through the ``uptime_check_ips`` field on the + corresponding responses. + + All the usual :class:`~.uptime_service.ListUptimeCheckIpsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., uptime_service.ListUptimeCheckIpsResponse], + request: uptime_service.ListUptimeCheckIpsRequest, + response: uptime_service.ListUptimeCheckIpsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.uptime_service.ListUptimeCheckIpsRequest`): + The initial request object. + response (:class:`~.uptime_service.ListUptimeCheckIpsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = uptime_service.ListUptimeCheckIpsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[uptime_service.ListUptimeCheckIpsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[uptime.UptimeCheckIp]: + for page in self.pages: + yield from page.uptime_check_ips + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListUptimeCheckIpsAsyncPager: + """A pager for iterating through ``list_uptime_check_ips`` requests. + + This class thinly wraps an initial + :class:`~.uptime_service.ListUptimeCheckIpsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``uptime_check_ips`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListUptimeCheckIps`` requests and continue to iterate + through the ``uptime_check_ips`` field on the + corresponding responses. + + All the usual :class:`~.uptime_service.ListUptimeCheckIpsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[uptime_service.ListUptimeCheckIpsResponse]], + request: uptime_service.ListUptimeCheckIpsRequest, + response: uptime_service.ListUptimeCheckIpsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.uptime_service.ListUptimeCheckIpsRequest`): + The initial request object. + response (:class:`~.uptime_service.ListUptimeCheckIpsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = uptime_service.ListUptimeCheckIpsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[uptime_service.ListUptimeCheckIpsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[uptime.UptimeCheckIp]: + async def async_generator(): + async for page in self.pages: + for response in page.uptime_check_ips: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/monitoring_v3/services/uptime_check_service/transports/__init__.py b/google/cloud/monitoring_v3/services/uptime_check_service/transports/__init__.py new file mode 100644 index 00000000..5f21c445 --- /dev/null +++ b/google/cloud/monitoring_v3/services/uptime_check_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import UptimeCheckServiceTransport +from .grpc import UptimeCheckServiceGrpcTransport +from .grpc_asyncio import UptimeCheckServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[UptimeCheckServiceTransport]] +_transport_registry["grpc"] = UptimeCheckServiceGrpcTransport +_transport_registry["grpc_asyncio"] = UptimeCheckServiceGrpcAsyncIOTransport + + +__all__ = ( + "UptimeCheckServiceTransport", + "UptimeCheckServiceGrpcTransport", + "UptimeCheckServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/monitoring_v3/services/uptime_check_service/transports/base.py b/google/cloud/monitoring_v3/services/uptime_check_service/transports/base.py new file mode 100644 index 00000000..4ab62743 --- /dev/null +++ b/google/cloud/monitoring_v3/services/uptime_check_service/transports/base.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.monitoring_v3.types import uptime +from google.cloud.monitoring_v3.types import uptime_service +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-monitoring", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class UptimeCheckServiceTransport(abc.ABC): + """Abstract transport class for UptimeCheckService.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_uptime_check_configs: gapic_v1.method.wrap_method( + self.list_uptime_check_configs, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.get_uptime_check_config: gapic_v1.method.wrap_method( + self.get_uptime_check_config, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.create_uptime_check_config: gapic_v1.method.wrap_method( + self.create_uptime_check_config, + default_timeout=30.0, + client_info=client_info, + ), + self.update_uptime_check_config: gapic_v1.method.wrap_method( + self.update_uptime_check_config, + default_timeout=30.0, + client_info=client_info, + ), + self.delete_uptime_check_config: gapic_v1.method.wrap_method( + self.delete_uptime_check_config, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.list_uptime_check_ips: gapic_v1.method.wrap_method( + self.list_uptime_check_ips, + default_retry=retries.Retry( + initial=0.1, + maximum=30.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + } + + @property + def list_uptime_check_configs( + self, + ) -> typing.Callable[ + [uptime_service.ListUptimeCheckConfigsRequest], + typing.Union[ + uptime_service.ListUptimeCheckConfigsResponse, + typing.Awaitable[uptime_service.ListUptimeCheckConfigsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_uptime_check_config( + self, + ) -> typing.Callable[ + [uptime_service.GetUptimeCheckConfigRequest], + typing.Union[ + uptime.UptimeCheckConfig, typing.Awaitable[uptime.UptimeCheckConfig] + ], + ]: + raise NotImplementedError() + + @property + def create_uptime_check_config( + self, + ) -> typing.Callable[ + [uptime_service.CreateUptimeCheckConfigRequest], + typing.Union[ + uptime.UptimeCheckConfig, typing.Awaitable[uptime.UptimeCheckConfig] + ], + ]: + raise NotImplementedError() + + @property + def update_uptime_check_config( + self, + ) -> typing.Callable[ + [uptime_service.UpdateUptimeCheckConfigRequest], + typing.Union[ + uptime.UptimeCheckConfig, typing.Awaitable[uptime.UptimeCheckConfig] + ], + ]: + raise NotImplementedError() + + @property + def delete_uptime_check_config( + self, + ) -> typing.Callable[ + [uptime_service.DeleteUptimeCheckConfigRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def list_uptime_check_ips( + self, + ) -> typing.Callable[ + [uptime_service.ListUptimeCheckIpsRequest], + typing.Union[ + uptime_service.ListUptimeCheckIpsResponse, + typing.Awaitable[uptime_service.ListUptimeCheckIpsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("UptimeCheckServiceTransport",) diff --git a/google/cloud/monitoring_v3/services/uptime_check_service/transports/grpc.py b/google/cloud/monitoring_v3/services/uptime_check_service/transports/grpc.py new file mode 100644 index 00000000..3d27be4d --- /dev/null +++ b/google/cloud/monitoring_v3/services/uptime_check_service/transports/grpc.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.monitoring_v3.types import uptime +from google.cloud.monitoring_v3.types import uptime_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import UptimeCheckServiceTransport, DEFAULT_CLIENT_INFO + + +class UptimeCheckServiceGrpcTransport(UptimeCheckServiceTransport): + """gRPC backend transport for UptimeCheckService. + + The UptimeCheckService API is used to manage (list, create, delete, + edit) Uptime check configurations in the Stackdriver Monitoring + product. An Uptime check is a piece of configuration that determines + which resources and services to monitor for availability. These + configurations can also be configured interactively by navigating to + the [Cloud Console] (http://console.cloud.google.com), selecting the + appropriate project, clicking on "Monitoring" on the left-hand side + to navigate to Stackdriver, and then clicking on "Uptime". + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_uptime_check_configs( + self, + ) -> Callable[ + [uptime_service.ListUptimeCheckConfigsRequest], + uptime_service.ListUptimeCheckConfigsResponse, + ]: + r"""Return a callable for the list uptime check configs method over gRPC. + + Lists the existing valid Uptime check configurations + for the project (leaving out any invalid + configurations). + + Returns: + Callable[[~.ListUptimeCheckConfigsRequest], + ~.ListUptimeCheckConfigsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_uptime_check_configs" not in self._stubs: + self._stubs["list_uptime_check_configs"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + request_serializer=uptime_service.ListUptimeCheckConfigsRequest.serialize, + response_deserializer=uptime_service.ListUptimeCheckConfigsResponse.deserialize, + ) + return self._stubs["list_uptime_check_configs"] + + @property + def get_uptime_check_config( + self, + ) -> Callable[ + [uptime_service.GetUptimeCheckConfigRequest], uptime.UptimeCheckConfig + ]: + r"""Return a callable for the get uptime check config method over gRPC. + + Gets a single Uptime check configuration. + + Returns: + Callable[[~.GetUptimeCheckConfigRequest], + ~.UptimeCheckConfig]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_uptime_check_config" not in self._stubs: + self._stubs["get_uptime_check_config"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + request_serializer=uptime_service.GetUptimeCheckConfigRequest.serialize, + response_deserializer=uptime.UptimeCheckConfig.deserialize, + ) + return self._stubs["get_uptime_check_config"] + + @property + def create_uptime_check_config( + self, + ) -> Callable[ + [uptime_service.CreateUptimeCheckConfigRequest], uptime.UptimeCheckConfig + ]: + r"""Return a callable for the create uptime check config method over gRPC. + + Creates a new Uptime check configuration. + + Returns: + Callable[[~.CreateUptimeCheckConfigRequest], + ~.UptimeCheckConfig]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_uptime_check_config" not in self._stubs: + self._stubs["create_uptime_check_config"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + request_serializer=uptime_service.CreateUptimeCheckConfigRequest.serialize, + response_deserializer=uptime.UptimeCheckConfig.deserialize, + ) + return self._stubs["create_uptime_check_config"] + + @property + def update_uptime_check_config( + self, + ) -> Callable[ + [uptime_service.UpdateUptimeCheckConfigRequest], uptime.UptimeCheckConfig + ]: + r"""Return a callable for the update uptime check config method over gRPC. + + Updates an Uptime check configuration. You can either replace + the entire configuration with a new one or replace only certain + fields in the current configuration by specifying the fields to + be updated via ``updateMask``. Returns the updated + configuration. + + Returns: + Callable[[~.UpdateUptimeCheckConfigRequest], + ~.UptimeCheckConfig]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_uptime_check_config" not in self._stubs: + self._stubs["update_uptime_check_config"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + request_serializer=uptime_service.UpdateUptimeCheckConfigRequest.serialize, + response_deserializer=uptime.UptimeCheckConfig.deserialize, + ) + return self._stubs["update_uptime_check_config"] + + @property + def delete_uptime_check_config( + self, + ) -> Callable[[uptime_service.DeleteUptimeCheckConfigRequest], empty.Empty]: + r"""Return a callable for the delete uptime check config method over gRPC. + + Deletes an Uptime check configuration. Note that this + method will fail if the Uptime check configuration is + referenced by an alert policy or other dependent configs + that would be rendered invalid by the deletion. + + Returns: + Callable[[~.DeleteUptimeCheckConfigRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_uptime_check_config" not in self._stubs: + self._stubs["delete_uptime_check_config"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + request_serializer=uptime_service.DeleteUptimeCheckConfigRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_uptime_check_config"] + + @property + def list_uptime_check_ips( + self, + ) -> Callable[ + [uptime_service.ListUptimeCheckIpsRequest], + uptime_service.ListUptimeCheckIpsResponse, + ]: + r"""Return a callable for the list uptime check ips method over gRPC. + + Returns the list of IP addresses that checkers run + from + + Returns: + Callable[[~.ListUptimeCheckIpsRequest], + ~.ListUptimeCheckIpsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_uptime_check_ips" not in self._stubs: + self._stubs["list_uptime_check_ips"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + request_serializer=uptime_service.ListUptimeCheckIpsRequest.serialize, + response_deserializer=uptime_service.ListUptimeCheckIpsResponse.deserialize, + ) + return self._stubs["list_uptime_check_ips"] + + +__all__ = ("UptimeCheckServiceGrpcTransport",) diff --git a/google/cloud/monitoring_v3/services/uptime_check_service/transports/grpc_asyncio.py b/google/cloud/monitoring_v3/services/uptime_check_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..7f5a7575 --- /dev/null +++ b/google/cloud/monitoring_v3/services/uptime_check_service/transports/grpc_asyncio.py @@ -0,0 +1,425 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.monitoring_v3.types import uptime +from google.cloud.monitoring_v3.types import uptime_service +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import UptimeCheckServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import UptimeCheckServiceGrpcTransport + + +class UptimeCheckServiceGrpcAsyncIOTransport(UptimeCheckServiceTransport): + """gRPC AsyncIO backend transport for UptimeCheckService. + + The UptimeCheckService API is used to manage (list, create, delete, + edit) Uptime check configurations in the Stackdriver Monitoring + product. An Uptime check is a piece of configuration that determines + which resources and services to monitor for availability. These + configurations can also be configured interactively by navigating to + the [Cloud Console] (http://console.cloud.google.com), selecting the + appropriate project, clicking on "Monitoring" on the left-hand side + to navigate to Stackdriver, and then clicking on "Uptime". + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "monitoring.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_uptime_check_configs( + self, + ) -> Callable[ + [uptime_service.ListUptimeCheckConfigsRequest], + Awaitable[uptime_service.ListUptimeCheckConfigsResponse], + ]: + r"""Return a callable for the list uptime check configs method over gRPC. + + Lists the existing valid Uptime check configurations + for the project (leaving out any invalid + configurations). + + Returns: + Callable[[~.ListUptimeCheckConfigsRequest], + Awaitable[~.ListUptimeCheckConfigsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_uptime_check_configs" not in self._stubs: + self._stubs["list_uptime_check_configs"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + request_serializer=uptime_service.ListUptimeCheckConfigsRequest.serialize, + response_deserializer=uptime_service.ListUptimeCheckConfigsResponse.deserialize, + ) + return self._stubs["list_uptime_check_configs"] + + @property + def get_uptime_check_config( + self, + ) -> Callable[ + [uptime_service.GetUptimeCheckConfigRequest], + Awaitable[uptime.UptimeCheckConfig], + ]: + r"""Return a callable for the get uptime check config method over gRPC. + + Gets a single Uptime check configuration. + + Returns: + Callable[[~.GetUptimeCheckConfigRequest], + Awaitable[~.UptimeCheckConfig]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_uptime_check_config" not in self._stubs: + self._stubs["get_uptime_check_config"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + request_serializer=uptime_service.GetUptimeCheckConfigRequest.serialize, + response_deserializer=uptime.UptimeCheckConfig.deserialize, + ) + return self._stubs["get_uptime_check_config"] + + @property + def create_uptime_check_config( + self, + ) -> Callable[ + [uptime_service.CreateUptimeCheckConfigRequest], + Awaitable[uptime.UptimeCheckConfig], + ]: + r"""Return a callable for the create uptime check config method over gRPC. + + Creates a new Uptime check configuration. + + Returns: + Callable[[~.CreateUptimeCheckConfigRequest], + Awaitable[~.UptimeCheckConfig]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_uptime_check_config" not in self._stubs: + self._stubs["create_uptime_check_config"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + request_serializer=uptime_service.CreateUptimeCheckConfigRequest.serialize, + response_deserializer=uptime.UptimeCheckConfig.deserialize, + ) + return self._stubs["create_uptime_check_config"] + + @property + def update_uptime_check_config( + self, + ) -> Callable[ + [uptime_service.UpdateUptimeCheckConfigRequest], + Awaitable[uptime.UptimeCheckConfig], + ]: + r"""Return a callable for the update uptime check config method over gRPC. + + Updates an Uptime check configuration. You can either replace + the entire configuration with a new one or replace only certain + fields in the current configuration by specifying the fields to + be updated via ``updateMask``. Returns the updated + configuration. + + Returns: + Callable[[~.UpdateUptimeCheckConfigRequest], + Awaitable[~.UptimeCheckConfig]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_uptime_check_config" not in self._stubs: + self._stubs["update_uptime_check_config"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + request_serializer=uptime_service.UpdateUptimeCheckConfigRequest.serialize, + response_deserializer=uptime.UptimeCheckConfig.deserialize, + ) + return self._stubs["update_uptime_check_config"] + + @property + def delete_uptime_check_config( + self, + ) -> Callable[ + [uptime_service.DeleteUptimeCheckConfigRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete uptime check config method over gRPC. + + Deletes an Uptime check configuration. Note that this + method will fail if the Uptime check configuration is + referenced by an alert policy or other dependent configs + that would be rendered invalid by the deletion. + + Returns: + Callable[[~.DeleteUptimeCheckConfigRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_uptime_check_config" not in self._stubs: + self._stubs["delete_uptime_check_config"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + request_serializer=uptime_service.DeleteUptimeCheckConfigRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_uptime_check_config"] + + @property + def list_uptime_check_ips( + self, + ) -> Callable[ + [uptime_service.ListUptimeCheckIpsRequest], + Awaitable[uptime_service.ListUptimeCheckIpsResponse], + ]: + r"""Return a callable for the list uptime check ips method over gRPC. + + Returns the list of IP addresses that checkers run + from + + Returns: + Callable[[~.ListUptimeCheckIpsRequest], + Awaitable[~.ListUptimeCheckIpsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_uptime_check_ips" not in self._stubs: + self._stubs["list_uptime_check_ips"] = self.grpc_channel.unary_unary( + "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + request_serializer=uptime_service.ListUptimeCheckIpsRequest.serialize, + response_deserializer=uptime_service.ListUptimeCheckIpsResponse.deserialize, + ) + return self._stubs["list_uptime_check_ips"] + + +__all__ = ("UptimeCheckServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/monitoring_v3/types.py b/google/cloud/monitoring_v3/types.py deleted file mode 100644 index 81c88a5c..00000000 --- a/google/cloud/monitoring_v3/types.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.api import distribution_pb2 -from google.api import label_pb2 -from google.api import metric_pb2 as api_metric_pb2 -from google.api import monitored_resource_pb2 -from google.cloud.monitoring_v3.proto import alert_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2 -from google.cloud.monitoring_v3.proto import common_pb2 -from google.cloud.monitoring_v3.proto import dropped_labels_pb2 -from google.cloud.monitoring_v3.proto import group_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2 -from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2 -from google.cloud.monitoring_v3.proto import mutation_record_pb2 -from google.cloud.monitoring_v3.proto import notification_pb2 -from google.cloud.monitoring_v3.proto import notification_service_pb2 -from google.cloud.monitoring_v3.proto import service_pb2 -from google.cloud.monitoring_v3.proto import service_service_pb2 -from google.cloud.monitoring_v3.proto import span_context_pb2 -from google.cloud.monitoring_v3.proto import uptime_pb2 -from google.cloud.monitoring_v3.proto import uptime_service_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import struct_pb2 -from google.protobuf import timestamp_pb2 -from google.protobuf import wrappers_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - distribution_pb2, - label_pb2, - api_metric_pb2, - monitored_resource_pb2, - any_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - struct_pb2, - timestamp_pb2, - wrappers_pb2, - status_pb2, -] - -_local_modules = [ - alert_pb2, - alert_service_pb2, - common_pb2, - dropped_labels_pb2, - group_pb2, - group_service_pb2, - proto_metric_pb2, - metric_service_pb2, - mutation_record_pb2, - notification_pb2, - notification_service_pb2, - service_pb2, - service_service_pb2, - span_context_pb2, - uptime_pb2, - uptime_service_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.monitoring_v3.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/monitoring_v3/types/__init__.py b/google/cloud/monitoring_v3/types/__init__.py new file mode 100644 index 00000000..51d7a273 --- /dev/null +++ b/google/cloud/monitoring_v3/types/__init__.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .common import ( + TypedValue, + TimeInterval, + Aggregation, +) +from .mutation_record import MutationRecord +from .alert import AlertPolicy +from .alert_service import ( + CreateAlertPolicyRequest, + GetAlertPolicyRequest, + ListAlertPoliciesRequest, + ListAlertPoliciesResponse, + UpdateAlertPolicyRequest, + DeleteAlertPolicyRequest, +) +from .dropped_labels import DroppedLabels +from .group import Group +from .group_service import ( + ListGroupsRequest, + ListGroupsResponse, + GetGroupRequest, + CreateGroupRequest, + UpdateGroupRequest, + DeleteGroupRequest, + ListGroupMembersRequest, + ListGroupMembersResponse, +) +from .metric import ( + Point, + TimeSeries, + TimeSeriesDescriptor, + TimeSeriesData, + LabelValue, + QueryError, + TextLocator, +) +from .metric_service import ( + ListMonitoredResourceDescriptorsRequest, + ListMonitoredResourceDescriptorsResponse, + GetMonitoredResourceDescriptorRequest, + ListMetricDescriptorsRequest, + ListMetricDescriptorsResponse, + GetMetricDescriptorRequest, + CreateMetricDescriptorRequest, + DeleteMetricDescriptorRequest, + ListTimeSeriesRequest, + ListTimeSeriesResponse, + CreateTimeSeriesRequest, + CreateTimeSeriesError, + CreateTimeSeriesSummary, + QueryTimeSeriesRequest, + QueryTimeSeriesResponse, + QueryErrorList, +) +from .notification import ( + NotificationChannelDescriptor, + NotificationChannel, +) +from .notification_service import ( + ListNotificationChannelDescriptorsRequest, + ListNotificationChannelDescriptorsResponse, + GetNotificationChannelDescriptorRequest, + CreateNotificationChannelRequest, + ListNotificationChannelsRequest, + ListNotificationChannelsResponse, + GetNotificationChannelRequest, + UpdateNotificationChannelRequest, + DeleteNotificationChannelRequest, + SendNotificationChannelVerificationCodeRequest, + GetNotificationChannelVerificationCodeRequest, + GetNotificationChannelVerificationCodeResponse, + VerifyNotificationChannelRequest, +) +from .service import ( + Service, + ServiceLevelObjective, + ServiceLevelIndicator, + BasicSli, + Range, + RequestBasedSli, + TimeSeriesRatio, + DistributionCut, + WindowsBasedSli, +) +from .service_service import ( + CreateServiceRequest, + GetServiceRequest, + ListServicesRequest, + ListServicesResponse, + UpdateServiceRequest, + DeleteServiceRequest, + CreateServiceLevelObjectiveRequest, + GetServiceLevelObjectiveRequest, + ListServiceLevelObjectivesRequest, + ListServiceLevelObjectivesResponse, + UpdateServiceLevelObjectiveRequest, + DeleteServiceLevelObjectiveRequest, +) +from .span_context import SpanContext +from .uptime import ( + InternalChecker, + UptimeCheckConfig, + UptimeCheckIp, +) +from .uptime_service import ( + ListUptimeCheckConfigsRequest, + ListUptimeCheckConfigsResponse, + GetUptimeCheckConfigRequest, + CreateUptimeCheckConfigRequest, + UpdateUptimeCheckConfigRequest, + DeleteUptimeCheckConfigRequest, + ListUptimeCheckIpsRequest, + ListUptimeCheckIpsResponse, +) + + +__all__ = ( + "TypedValue", + "TimeInterval", + "Aggregation", + "MutationRecord", + "AlertPolicy", + "CreateAlertPolicyRequest", + "GetAlertPolicyRequest", + "ListAlertPoliciesRequest", + "ListAlertPoliciesResponse", + "UpdateAlertPolicyRequest", + "DeleteAlertPolicyRequest", + "DroppedLabels", + "Group", + "ListGroupsRequest", + "ListGroupsResponse", + "GetGroupRequest", + "CreateGroupRequest", + "UpdateGroupRequest", + "DeleteGroupRequest", + "ListGroupMembersRequest", + "ListGroupMembersResponse", + "Point", + "TimeSeries", + "TimeSeriesDescriptor", + "TimeSeriesData", + "LabelValue", + "QueryError", + "TextLocator", + "ListMonitoredResourceDescriptorsRequest", + "ListMonitoredResourceDescriptorsResponse", + "GetMonitoredResourceDescriptorRequest", + "ListMetricDescriptorsRequest", + "ListMetricDescriptorsResponse", + "GetMetricDescriptorRequest", + "CreateMetricDescriptorRequest", + "DeleteMetricDescriptorRequest", + "ListTimeSeriesRequest", + "ListTimeSeriesResponse", + "CreateTimeSeriesRequest", + "CreateTimeSeriesError", + "CreateTimeSeriesSummary", + "QueryTimeSeriesRequest", + "QueryTimeSeriesResponse", + "QueryErrorList", + "NotificationChannelDescriptor", + "NotificationChannel", + "ListNotificationChannelDescriptorsRequest", + "ListNotificationChannelDescriptorsResponse", + "GetNotificationChannelDescriptorRequest", + "CreateNotificationChannelRequest", + "ListNotificationChannelsRequest", + "ListNotificationChannelsResponse", + "GetNotificationChannelRequest", + "UpdateNotificationChannelRequest", + "DeleteNotificationChannelRequest", + "SendNotificationChannelVerificationCodeRequest", + "GetNotificationChannelVerificationCodeRequest", + "GetNotificationChannelVerificationCodeResponse", + "VerifyNotificationChannelRequest", + "Service", + "ServiceLevelObjective", + "ServiceLevelIndicator", + "BasicSli", + "Range", + "RequestBasedSli", + "TimeSeriesRatio", + "DistributionCut", + "WindowsBasedSli", + "CreateServiceRequest", + "GetServiceRequest", + "ListServicesRequest", + "ListServicesResponse", + "UpdateServiceRequest", + "DeleteServiceRequest", + "CreateServiceLevelObjectiveRequest", + "GetServiceLevelObjectiveRequest", + "ListServiceLevelObjectivesRequest", + "ListServiceLevelObjectivesResponse", + "UpdateServiceLevelObjectiveRequest", + "DeleteServiceLevelObjectiveRequest", + "SpanContext", + "InternalChecker", + "UptimeCheckConfig", + "UptimeCheckIp", + "ListUptimeCheckConfigsRequest", + "ListUptimeCheckConfigsResponse", + "GetUptimeCheckConfigRequest", + "CreateUptimeCheckConfigRequest", + "UpdateUptimeCheckConfigRequest", + "DeleteUptimeCheckConfigRequest", + "ListUptimeCheckIpsRequest", + "ListUptimeCheckIpsResponse", +) diff --git a/google/cloud/monitoring_v3/types/alert.py b/google/cloud/monitoring_v3/types/alert.py new file mode 100644 index 00000000..e9a2093e --- /dev/null +++ b/google/cloud/monitoring_v3/types/alert.py @@ -0,0 +1,448 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import mutation_record as gm_mutation_record +from google.protobuf import duration_pb2 as gp_duration # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore +from google.rpc import status_pb2 as status # type: ignore + + +__protobuf__ = proto.module(package="google.monitoring.v3", manifest={"AlertPolicy",},) + + +class AlertPolicy(proto.Message): + r"""A description of the conditions under which some aspect of your + system is considered to be "unhealthy" and the ways to notify people + or services about this state. For an overview of alert policies, see + `Introduction to + Alerting `__. + + Attributes: + name (str): + Required if the policy exists. The resource name for this + policy. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + + ``[ALERT_POLICY_ID]`` is assigned by Stackdriver Monitoring + when the policy is created. When calling the + [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + method, do not include the ``name`` field in the alerting + policy passed as part of the request. + display_name (str): + A short name or phrase used to identify the + policy in dashboards, notifications, and + incidents. To avoid confusion, don't use the + same display name for multiple policies in the + same project. The name is limited to 512 Unicode + characters. + documentation (~.alert.AlertPolicy.Documentation): + Documentation that is included with + notifications and incidents related to this + policy. Best practice is for the documentation + to include information to help responders + understand, mitigate, escalate, and correct the + underlying problems detected by the alerting + policy. Notification channels that have limited + capacity might not show this documentation. + user_labels (Sequence[~.alert.AlertPolicy.UserLabelsEntry]): + User-supplied key/value data to be used for organizing and + identifying the ``AlertPolicy`` objects. + + The field can contain up to 64 entries. Each key and value + is limited to 63 Unicode characters or 128 bytes, whichever + is smaller. Labels and values can contain only lowercase + letters, numerals, underscores, and dashes. Keys must begin + with a letter. + conditions (Sequence[~.alert.AlertPolicy.Condition]): + A list of conditions for the policy. The conditions are + combined by AND or OR according to the ``combiner`` field. + If the combined conditions evaluate to true, then an + incident is created. A policy can have from one to six + conditions. If ``condition_time_series_query_language`` is + present, it must be the only ``condition``. + combiner (~.alert.AlertPolicy.ConditionCombinerType): + How to combine the results of multiple conditions to + determine if an incident should be opened. If + ``condition_time_series_query_language`` is present, this + must be ``COMBINE_UNSPECIFIED``. + enabled (~.wrappers.BoolValue): + Whether or not the policy is enabled. On + write, the default interpretation if unset is + that the policy is enabled. On read, clients + should not make any assumption about the state + if it has not been populated. The field should + always be populated on List and Get operations, + unless a field projection has been specified + that strips it out. + validity (~.status.Status): + Read-only description of how the alert policy + is invalid. OK if the alert policy is valid. If + not OK, the alert policy will not generate + incidents. + notification_channels (Sequence[str]): + Identifies the notification channels to which notifications + should be sent when incidents are opened or closed or when + new violations occur on an already opened incident. Each + element of this array corresponds to the ``name`` field in + each of the + [``NotificationChannel``][google.monitoring.v3.NotificationChannel] + objects that are returned from the + [``ListNotificationChannels``] + [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + method. The format of the entries in this field is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + creation_record (~.gm_mutation_record.MutationRecord): + A read-only record of the creation of the + alerting policy. If provided in a call to create + or update, this field will be ignored. + mutation_record (~.gm_mutation_record.MutationRecord): + A read-only record of the most recent change + to the alerting policy. If provided in a call to + create or update, this field will be ignored. + """ + + class ConditionCombinerType(proto.Enum): + r"""Operators for combining conditions.""" + COMBINE_UNSPECIFIED = 0 + AND = 1 + OR = 2 + AND_WITH_MATCHING_RESOURCE = 3 + + class Documentation(proto.Message): + r"""A content string and a MIME type that describes the content + string's format. + + Attributes: + content (str): + The text of the documentation, interpreted according to + ``mime_type``. The content may not exceed 8,192 Unicode + characters and may not exceed more than 10,240 bytes when + encoded in UTF-8 format, whichever is smaller. + mime_type (str): + The format of the ``content`` field. Presently, only the + value ``"text/markdown"`` is supported. See + `Markdown `__ for + more information. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + class Condition(proto.Message): + r"""A condition is a true/false test that determines when an + alerting policy should open an incident. If a condition + evaluates to true, it signifies that something is wrong. + + Attributes: + name (str): + Required if the condition exists. The unique resource name + for this condition. Its format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + + ``[CONDITION_ID]`` is assigned by Stackdriver Monitoring + when the condition is created as part of a new or updated + alerting policy. + + When calling the + [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + method, do not include the ``name`` field in the conditions + of the requested alerting policy. Stackdriver Monitoring + creates the condition identifiers and includes them in the + new policy. + + When calling the + [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] + method to update a policy, including a condition ``name`` + causes the existing condition to be updated. Conditions + without names are added to the updated policy. Existing + conditions are deleted if they are not updated. + + Best practice is to preserve ``[CONDITION_ID]`` if you make + only small changes, such as those to condition thresholds, + durations, or trigger values. Otherwise, treat the change as + a new condition and let the existing condition be deleted. + display_name (str): + A short name or phrase used to identify the + condition in dashboards, notifications, and + incidents. To avoid confusion, don't use the + same display name for multiple conditions in the + same policy. + condition_threshold (~.alert.AlertPolicy.Condition.MetricThreshold): + A condition that compares a time series + against a threshold. + condition_absent (~.alert.AlertPolicy.Condition.MetricAbsence): + A condition that checks that a time series + continues to receive new data points. + """ + + class Trigger(proto.Message): + r"""Specifies how many time series must fail a predicate to trigger a + condition. If not specified, then a ``{count: 1}`` trigger is used. + + Attributes: + count (int): + The absolute number of time series that must + fail the predicate for the condition to be + triggered. + percent (float): + The percentage of time series that must fail + the predicate for the condition to be triggered. + """ + + count = proto.Field(proto.INT32, number=1, oneof="type") + + percent = proto.Field(proto.DOUBLE, number=2, oneof="type") + + class MetricThreshold(proto.Message): + r"""A condition type that compares a collection of time series + against a threshold. + + Attributes: + filter (str): + A + `filter `__ + that identifies which time series should be compared with + the threshold. + + The filter is similar to the one that is specified in the + ```ListTimeSeries`` + request `__ + (that call is useful to verify the time series that will be + retrieved / processed) and must specify the metric type and + optionally may contain restrictions on resource type, + resource labels, and metric labels. This field may not + exceed 2048 Unicode characters in length. + aggregations (Sequence[~.common.Aggregation]): + Specifies the alignment of data points in individual time + series as well as how to combine the retrieved time series + together (such as when aggregating multiple streams on each + resource to a single stream for each resource or when + aggregating streams across all members of a group of + resrouces). Multiple aggregations are applied in the order + specified. + + This field is similar to the one in the ```ListTimeSeries`` + request `__. + It is advisable to use the ``ListTimeSeries`` method when + debugging this field. + denominator_filter (str): + A + `filter `__ + that identifies a time series that should be used as the + denominator of a ratio that will be compared with the + threshold. If a ``denominator_filter`` is specified, the + time series specified by the ``filter`` field will be used + as the numerator. + + The filter must specify the metric type and optionally may + contain restrictions on resource type, resource labels, and + metric labels. This field may not exceed 2048 Unicode + characters in length. + denominator_aggregations (Sequence[~.common.Aggregation]): + Specifies the alignment of data points in individual time + series selected by ``denominatorFilter`` as well as how to + combine the retrieved time series together (such as when + aggregating multiple streams on each resource to a single + stream for each resource or when aggregating streams across + all members of a group of resources). + + When computing ratios, the ``aggregations`` and + ``denominator_aggregations`` fields must use the same + alignment period and produce time series that have the same + periodicity and labels. + comparison (~.common.ComparisonType): + The comparison to apply between the time series (indicated + by ``filter`` and ``aggregation``) and the threshold + (indicated by ``threshold_value``). The comparison is + applied on each time series, with the time series on the + left-hand side and the threshold on the right-hand side. + + Only ``COMPARISON_LT`` and ``COMPARISON_GT`` are supported + currently. + threshold_value (float): + A value against which to compare the time + series. + duration (~.gp_duration.Duration): + The amount of time that a time series must violate the + threshold to be considered failing. Currently, only values + that are a multiple of a minute--e.g., 0, 60, 120, or 300 + seconds--are supported. If an invalid value is given, an + error will be returned. When choosing a duration, it is + useful to keep in mind the frequency of the underlying time + series data (which may also be affected by any alignments + specified in the ``aggregations`` field); a good duration is + long enough so that a single outlier does not generate + spurious alerts, but short enough that unhealthy states are + detected and alerted on quickly. + trigger (~.alert.AlertPolicy.Condition.Trigger): + The number/percent of time series for which the comparison + must hold in order for the condition to trigger. If + unspecified, then the condition will trigger if the + comparison is true for any of the time series that have been + identified by ``filter`` and ``aggregations``, or by the + ratio, if ``denominator_filter`` and + ``denominator_aggregations`` are specified. + """ + + filter = proto.Field(proto.STRING, number=2) + + aggregations = proto.RepeatedField( + proto.MESSAGE, number=8, message=common.Aggregation, + ) + + denominator_filter = proto.Field(proto.STRING, number=9) + + denominator_aggregations = proto.RepeatedField( + proto.MESSAGE, number=10, message=common.Aggregation, + ) + + comparison = proto.Field(proto.ENUM, number=4, enum=common.ComparisonType,) + + threshold_value = proto.Field(proto.DOUBLE, number=5) + + duration = proto.Field( + proto.MESSAGE, number=6, message=gp_duration.Duration, + ) + + trigger = proto.Field( + proto.MESSAGE, number=7, message="AlertPolicy.Condition.Trigger", + ) + + class MetricAbsence(proto.Message): + r"""A condition type that checks that monitored resources are reporting + data. The configuration defines a metric and a set of monitored + resources. The predicate is considered in violation when a time + series for the specified metric of a monitored resource does not + include any data in the specified ``duration``. + + Attributes: + filter (str): + A + `filter `__ + that identifies which time series should be compared with + the threshold. + + The filter is similar to the one that is specified in the + ```ListTimeSeries`` + request `__ + (that call is useful to verify the time series that will be + retrieved / processed) and must specify the metric type and + optionally may contain restrictions on resource type, + resource labels, and metric labels. This field may not + exceed 2048 Unicode characters in length. + aggregations (Sequence[~.common.Aggregation]): + Specifies the alignment of data points in individual time + series as well as how to combine the retrieved time series + together (such as when aggregating multiple streams on each + resource to a single stream for each resource or when + aggregating streams across all members of a group of + resrouces). Multiple aggregations are applied in the order + specified. + + This field is similar to the one in the ```ListTimeSeries`` + request `__. + It is advisable to use the ``ListTimeSeries`` method when + debugging this field. + duration (~.gp_duration.Duration): + The amount of time that a time series must fail to report + new data to be considered failing. Currently, only values + that are a multiple of a minute--e.g. 60, 120, or 300 + seconds--are supported. If an invalid value is given, an + error will be returned. The ``Duration.nanos`` field is + ignored. + trigger (~.alert.AlertPolicy.Condition.Trigger): + The number/percent of time series for which the comparison + must hold in order for the condition to trigger. If + unspecified, then the condition will trigger if the + comparison is true for any of the time series that have been + identified by ``filter`` and ``aggregations``. + """ + + filter = proto.Field(proto.STRING, number=1) + + aggregations = proto.RepeatedField( + proto.MESSAGE, number=5, message=common.Aggregation, + ) + + duration = proto.Field( + proto.MESSAGE, number=2, message=gp_duration.Duration, + ) + + trigger = proto.Field( + proto.MESSAGE, number=3, message="AlertPolicy.Condition.Trigger", + ) + + name = proto.Field(proto.STRING, number=12) + + display_name = proto.Field(proto.STRING, number=6) + + condition_threshold = proto.Field( + proto.MESSAGE, + number=1, + oneof="condition", + message="AlertPolicy.Condition.MetricThreshold", + ) + + condition_absent = proto.Field( + proto.MESSAGE, + number=2, + oneof="condition", + message="AlertPolicy.Condition.MetricAbsence", + ) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + documentation = proto.Field(proto.MESSAGE, number=13, message=Documentation,) + + user_labels = proto.MapField(proto.STRING, proto.STRING, number=16) + + conditions = proto.RepeatedField(proto.MESSAGE, number=12, message=Condition,) + + combiner = proto.Field(proto.ENUM, number=6, enum=ConditionCombinerType,) + + enabled = proto.Field(proto.MESSAGE, number=17, message=wrappers.BoolValue,) + + validity = proto.Field(proto.MESSAGE, number=18, message=status.Status,) + + notification_channels = proto.RepeatedField(proto.STRING, number=14) + + creation_record = proto.Field( + proto.MESSAGE, number=10, message=gm_mutation_record.MutationRecord, + ) + + mutation_record = proto.Field( + proto.MESSAGE, number=11, message=gm_mutation_record.MutationRecord, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/alert_service.py b/google/cloud/monitoring_v3/types/alert_service.py new file mode 100644 index 00000000..38e5f65e --- /dev/null +++ b/google/cloud/monitoring_v3/types/alert_service.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.monitoring_v3.types import alert +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "CreateAlertPolicyRequest", + "GetAlertPolicyRequest", + "ListAlertPoliciesRequest", + "ListAlertPoliciesResponse", + "UpdateAlertPolicyRequest", + "DeleteAlertPolicyRequest", + }, +) + + +class CreateAlertPolicyRequest(proto.Message): + r"""The protocol for the ``CreateAlertPolicy`` request. + + Attributes: + name (str): + Required. The project in which to create the alerting + policy. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this field names the parent container in which the + alerting policy will be written, not the name of the created + policy. The alerting policy that is returned will have a + name that contains a normalized representation of this name + as a prefix but adds a suffix of the form + ``/alertPolicies/[ALERT_POLICY_ID]``, identifying the policy + in the container. + alert_policy (~.alert.AlertPolicy): + Required. The requested alerting policy. You should omit the + ``name`` field in this policy. The name will be returned in + the new policy, including a new ``[ALERT_POLICY_ID]`` value. + """ + + name = proto.Field(proto.STRING, number=3) + + alert_policy = proto.Field(proto.MESSAGE, number=2, message=alert.AlertPolicy,) + + +class GetAlertPolicyRequest(proto.Message): + r"""The protocol for the ``GetAlertPolicy`` request. + + Attributes: + name (str): + Required. The alerting policy to retrieve. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + """ + + name = proto.Field(proto.STRING, number=3) + + +class ListAlertPoliciesRequest(proto.Message): + r"""The protocol for the ``ListAlertPolicies`` request. + + Attributes: + name (str): + Required. The project whose alert policies are to be listed. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this field names the parent container in which the + alerting policies to be listed are stored. To retrieve a + single alerting policy by name, use the + [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + operation, instead. + filter (str): + If provided, this field specifies the criteria that must be + met by alert policies to be included in the response. + + For more details, see `sorting and + filtering `__. + order_by (str): + A comma-separated list of fields by which to sort the + result. Supports the same set of field references as the + ``filter`` field. Entries can be prefixed with a minus sign + to sort by the field in descending order. + + For more details, see `sorting and + filtering `__. + page_size (int): + The maximum number of results to return in a + single response. + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return more + results from the previous method call. + """ + + name = proto.Field(proto.STRING, number=4) + + filter = proto.Field(proto.STRING, number=5) + + order_by = proto.Field(proto.STRING, number=6) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListAlertPoliciesResponse(proto.Message): + r"""The protocol for the ``ListAlertPolicies`` response. + + Attributes: + alert_policies (Sequence[~.alert.AlertPolicy]): + The returned alert policies. + next_page_token (str): + If there might be more results than were returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + """ + + @property + def raw_page(self): + return self + + alert_policies = proto.RepeatedField( + proto.MESSAGE, number=3, message=alert.AlertPolicy, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateAlertPolicyRequest(proto.Message): + r"""The protocol for the ``UpdateAlertPolicy`` request. + + Attributes: + update_mask (~.field_mask.FieldMask): + Optional. A list of alerting policy field names. If this + field is not empty, each listed field in the existing + alerting policy is set to the value of the corresponding + field in the supplied policy (``alert_policy``), or to the + field's default value if the field is not in the supplied + alerting policy. Fields not listed retain their previous + value. + + Examples of valid field masks include ``display_name``, + ``documentation``, ``documentation.content``, + ``documentation.mime_type``, ``user_labels``, + ``user_label.nameofkey``, ``enabled``, ``conditions``, + ``combiner``, etc. + + If this field is empty, then the supplied alerting policy + replaces the existing policy. It is the same as deleting the + existing policy and adding the supplied policy, except for + the following: + + - The new policy will have the same ``[ALERT_POLICY_ID]`` + as the former policy. This gives you continuity with the + former policy in your notifications and incidents. + - Conditions in the new policy will keep their former + ``[CONDITION_ID]`` if the supplied condition includes the + ``name`` field with that ``[CONDITION_ID]``. If the + supplied condition omits the ``name`` field, then a new + ``[CONDITION_ID]`` is created. + alert_policy (~.alert.AlertPolicy): + Required. The updated alerting policy or the updated values + for the fields listed in ``update_mask``. If ``update_mask`` + is not empty, any fields in this policy that are not in + ``update_mask`` are ignored. + """ + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + alert_policy = proto.Field(proto.MESSAGE, number=3, message=alert.AlertPolicy,) + + +class DeleteAlertPolicyRequest(proto.Message): + r"""The protocol for the ``DeleteAlertPolicy`` request. + + Attributes: + name (str): + Required. The alerting policy to delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + + For more information, see + [AlertPolicy][google.monitoring.v3.AlertPolicy]. + """ + + name = proto.Field(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/common.py b/google/cloud/monitoring_v3/types/common.py new file mode 100644 index 00000000..a7da61e6 --- /dev/null +++ b/google/cloud/monitoring_v3/types/common.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import distribution_pb2 as distribution # type: ignore +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "ComparisonType", + "ServiceTier", + "TypedValue", + "TimeInterval", + "Aggregation", + }, +) + + +class ComparisonType(proto.Enum): + r"""Specifies an ordering relationship on two arguments, called ``left`` + and ``right``. + """ + COMPARISON_UNSPECIFIED = 0 + COMPARISON_GT = 1 + COMPARISON_GE = 2 + COMPARISON_LT = 3 + COMPARISON_LE = 4 + COMPARISON_EQ = 5 + COMPARISON_NE = 6 + + +class ServiceTier(proto.Enum): + r"""The tier of service for a Workspace. Please see the `service tiers + documentation `__ + for more details. + """ + SERVICE_TIER_UNSPECIFIED = 0 + SERVICE_TIER_BASIC = 1 + SERVICE_TIER_PREMIUM = 2 + + +class TypedValue(proto.Message): + r"""A single strongly-typed value. + + Attributes: + bool_value (bool): + A Boolean value: ``true`` or ``false``. + int64_value (int): + A 64-bit integer. Its range is approximately + ±9.2x1018. + double_value (float): + A 64-bit double-precision floating-point + number. Its magnitude is approximately + ±10±300 and it has 16 + significant digits of precision. + string_value (str): + A variable-length string value. + distribution_value (~.distribution.Distribution): + A distribution value. + """ + + bool_value = proto.Field(proto.BOOL, number=1, oneof="value") + + int64_value = proto.Field(proto.INT64, number=2, oneof="value") + + double_value = proto.Field(proto.DOUBLE, number=3, oneof="value") + + string_value = proto.Field(proto.STRING, number=4, oneof="value") + + distribution_value = proto.Field( + proto.MESSAGE, number=5, oneof="value", message=distribution.Distribution, + ) + + +class TimeInterval(proto.Message): + r"""A closed time interval. It extends from the start time to the end + time, and includes both: ``[startTime, endTime]``. Valid time + intervals depend on the + ```MetricKind`` `__ + of the metric value. In no case can the end time be earlier than the + start time. + + - For a ``GAUGE`` metric, the ``startTime`` value is technically + optional; if no value is specified, the start time defaults to + the value of the end time, and the interval represents a single + point in time. If both start and end times are specified, they + must be identical. Such an interval is valid only for ``GAUGE`` + metrics, which are point-in-time measurements. + + - For ``DELTA`` and ``CUMULATIVE`` metrics, the start time must be + earlier than the end time. + + - In all cases, the start time of the next interval must be at + least a millisecond after the end time of the previous interval. + Because the interval is closed, if the start time of a new + interval is the same as the end time of the previous interval, + data written at the new start time could overwrite data written + at the previous end time. + + Attributes: + end_time (~.timestamp.Timestamp): + Required. The end of the time interval. + start_time (~.timestamp.Timestamp): + Optional. The beginning of the time interval. + The default value for the start time is the end + time. The start time must not be later than the + end time. + """ + + end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + start_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp,) + + +class Aggregation(proto.Message): + r"""Describes how to combine multiple time series to provide a different + view of the data. Aggregation of time series is done in two steps. + First, each time series in the set is *aligned* to the same time + interval boundaries, then the set of time series is optionally + *reduced* in number. + + Alignment consists of applying the ``per_series_aligner`` operation + to each time series after its data has been divided into regular + ``alignment_period`` time intervals. This process takes *all* of the + data points in an alignment period, applies a mathematical + transformation such as averaging, minimum, maximum, delta, etc., and + converts them into a single data point per period. + + Reduction is when the aligned and transformed time series can + optionally be combined, reducing the number of time series through + similar mathematical transformations. Reduction involves applying a + ``cross_series_reducer`` to all the time series, optionally sorting + the time series into subsets with ``group_by_fields``, and applying + the reducer to each subset. + + The raw time series data can contain a huge amount of information + from multiple sources. Alignment and reduction transforms this mass + of data into a more manageable and representative collection of + data, for example "the 95% latency across the average of all tasks + in a cluster". This representative data can be more easily graphed + and comprehended, and the individual time series data is still + available for later drilldown. For more details, see `Filtering and + aggregation `__. + + Attributes: + alignment_period (~.duration.Duration): + The ``alignment_period`` specifies a time interval, in + seconds, that is used to divide the data in all the [time + series][google.monitoring.v3.TimeSeries] into consistent + blocks of time. This will be done before the per-series + aligner can be applied to the data. + + The value must be at least 60 seconds. If a per-series + aligner other than ``ALIGN_NONE`` is specified, this field + is required or an error is returned. If no per-series + aligner is specified, or the aligner ``ALIGN_NONE`` is + specified, then this field is ignored. + per_series_aligner (~.common.Aggregation.Aligner): + An ``Aligner`` describes how to bring the data points in a + single time series into temporal alignment. Except for + ``ALIGN_NONE``, all alignments cause all the data points in + an ``alignment_period`` to be mathematically grouped + together, resulting in a single data point for each + ``alignment_period`` with end timestamp at the end of the + period. + + Not all alignment operations may be applied to all time + series. The valid choices depend on the ``metric_kind`` and + ``value_type`` of the original time series. Alignment can + change the ``metric_kind`` or the ``value_type`` of the time + series. + + Time series data must be aligned in order to perform + cross-time series reduction. If ``cross_series_reducer`` is + specified, then ``per_series_aligner`` must be specified and + not equal to ``ALIGN_NONE`` and ``alignment_period`` must be + specified; otherwise, an error is returned. + cross_series_reducer (~.common.Aggregation.Reducer): + The reduction operation to be used to combine time series + into a single time series, where the value of each data + point in the resulting series is a function of all the + already aligned values in the input time series. + + Not all reducer operations can be applied to all time + series. The valid choices depend on the ``metric_kind`` and + the ``value_type`` of the original time series. Reduction + can yield a time series with a different ``metric_kind`` or + ``value_type`` than the input time series. + + Time series data must first be aligned (see + ``per_series_aligner``) in order to perform cross-time + series reduction. If ``cross_series_reducer`` is specified, + then ``per_series_aligner`` must be specified, and must not + be ``ALIGN_NONE``. An ``alignment_period`` must also be + specified; otherwise, an error is returned. + group_by_fields (Sequence[str]): + The set of fields to preserve when ``cross_series_reducer`` + is specified. The ``group_by_fields`` determine how the time + series are partitioned into subsets prior to applying the + aggregation operation. Each subset contains time series that + have the same value for each of the grouping fields. Each + individual time series is a member of exactly one subset. + The ``cross_series_reducer`` is applied to each subset of + time series. It is not possible to reduce across different + resource types, so this field implicitly contains + ``resource.type``. Fields not specified in + ``group_by_fields`` are aggregated away. If + ``group_by_fields`` is not specified and all the time series + have the same resource type, then the time series are + aggregated into a single output time series. If + ``cross_series_reducer`` is not defined, this field is + ignored. + """ + + class Aligner(proto.Enum): + r"""The ``Aligner`` specifies the operation that will be applied to the + data points in each alignment period in a time series. Except for + ``ALIGN_NONE``, which specifies that no operation be applied, each + alignment operation replaces the set of data values in each + alignment period with a single value: the result of applying the + operation to the data values. An aligned time series has a single + data value at the end of each ``alignment_period``. + + An alignment operation can change the data type of the values, too. + For example, if you apply a counting operation to boolean values, + the data ``value_type`` in the original time series is ``BOOLEAN``, + but the ``value_type`` in the aligned result is ``INT64``. + """ + ALIGN_NONE = 0 + ALIGN_DELTA = 1 + ALIGN_RATE = 2 + ALIGN_INTERPOLATE = 3 + ALIGN_NEXT_OLDER = 4 + ALIGN_MIN = 10 + ALIGN_MAX = 11 + ALIGN_MEAN = 12 + ALIGN_COUNT = 13 + ALIGN_SUM = 14 + ALIGN_STDDEV = 15 + ALIGN_COUNT_TRUE = 16 + ALIGN_COUNT_FALSE = 24 + ALIGN_FRACTION_TRUE = 17 + ALIGN_PERCENTILE_99 = 18 + ALIGN_PERCENTILE_95 = 19 + ALIGN_PERCENTILE_50 = 20 + ALIGN_PERCENTILE_05 = 21 + ALIGN_PERCENT_CHANGE = 23 + + class Reducer(proto.Enum): + r"""A Reducer operation describes how to aggregate data points + from multiple time series into a single time series, where the + value of each data point in the resulting series is a function + of all the already aligned values in the input time series. + """ + REDUCE_NONE = 0 + REDUCE_MEAN = 1 + REDUCE_MIN = 2 + REDUCE_MAX = 3 + REDUCE_SUM = 4 + REDUCE_STDDEV = 5 + REDUCE_COUNT = 6 + REDUCE_COUNT_TRUE = 7 + REDUCE_COUNT_FALSE = 15 + REDUCE_FRACTION_TRUE = 8 + REDUCE_PERCENTILE_99 = 9 + REDUCE_PERCENTILE_95 = 10 + REDUCE_PERCENTILE_50 = 11 + REDUCE_PERCENTILE_05 = 12 + + alignment_period = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + per_series_aligner = proto.Field(proto.ENUM, number=2, enum=Aligner,) + + cross_series_reducer = proto.Field(proto.ENUM, number=4, enum=Reducer,) + + group_by_fields = proto.RepeatedField(proto.STRING, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/dropped_labels.py b/google/cloud/monitoring_v3/types/dropped_labels.py new file mode 100644 index 00000000..c1e4b258 --- /dev/null +++ b/google/cloud/monitoring_v3/types/dropped_labels.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", manifest={"DroppedLabels",}, +) + + +class DroppedLabels(proto.Message): + r"""A set of (label, value) pairs which were dropped during + aggregation, attached to google.api.Distribution.Exemplars in + google.api.Distribution values during aggregation. + + These values are used in combination with the label values that + remain on the aggregated Distribution timeseries to construct + the full label set for the exemplar values. The resulting full + label set may be used to identify the specific task/job/instance + (for example) which may be contributing to a long-tail, while + allowing the storage savings of only storing aggregated + distribution values for a large group. + + Note that there are no guarantees on ordering of the labels from + exemplar-to-exemplar and from distribution-to-distribution in + the same stream, and there may be duplicates. It is up to + clients to resolve any ambiguities. + + Attributes: + label (Sequence[~.dropped_labels.DroppedLabels.LabelEntry]): + Map from label to its value, for all labels + dropped in any aggregation. + """ + + label = proto.MapField(proto.STRING, proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/group.py b/google/cloud/monitoring_v3/types/group.py new file mode 100644 index 00000000..24c110ca --- /dev/null +++ b/google/cloud/monitoring_v3/types/group.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module(package="google.monitoring.v3", manifest={"Group",},) + + +class Group(proto.Message): + r"""The description of a dynamic collection of monitored resources. Each + group has a filter that is matched against monitored resources and + their associated metadata. If a group's filter matches an available + monitored resource, then that resource is a member of that group. + Groups can contain any number of monitored resources, and each + monitored resource can be a member of any number of groups. + + Groups can be nested in parent-child hierarchies. The ``parentName`` + field identifies an optional parent for each group. If a group has a + parent, then the only monitored resources available to be matched by + the group's filter are the resources contained in the parent group. + In other words, a group contains the monitored resources that match + its filter and the filters of all the group's ancestors. A group + without a parent can contain any monitored resource. + + For example, consider an infrastructure running a set of instances + with two user-defined tags: ``"environment"`` and ``"role"``. A + parent group has a filter, ``environment="production"``. A child of + that parent group has a filter, ``role="transcoder"``. The parent + group contains all instances in the production environment, + regardless of their roles. The child group contains instances that + have the transcoder role *and* are in the production environment. + + The monitored resources contained in a group can change at any + moment, depending on what resources exist and what filters are + associated with the group and its ancestors. + + Attributes: + name (str): + Output only. The name of this group. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + + When creating a group, this field is ignored and a new name + is created consisting of the project specified in the call + to ``CreateGroup`` and a unique ``[GROUP_ID]`` that is + generated automatically. + display_name (str): + A user-assigned name for this group, used + only for display purposes. + parent_name (str): + The name of the group's parent, if it has one. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + + For groups with no parent, ``parent_name`` is the empty + string, ``""``. + filter (str): + The filter used to determine which monitored + resources belong to this group. + is_cluster (bool): + If true, the members of this group are + considered to be a cluster. The system can + perform additional analysis on groups that are + clusters. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + parent_name = proto.Field(proto.STRING, number=3) + + filter = proto.Field(proto.STRING, number=5) + + is_cluster = proto.Field(proto.BOOL, number=6) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/group_service.py b/google/cloud/monitoring_v3/types/group_service.py new file mode 100644 index 00000000..5ee90f1e --- /dev/null +++ b/google/cloud/monitoring_v3/types/group_service.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import group as gm_group + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "ListGroupsRequest", + "ListGroupsResponse", + "GetGroupRequest", + "CreateGroupRequest", + "UpdateGroupRequest", + "DeleteGroupRequest", + "ListGroupMembersRequest", + "ListGroupMembersResponse", + }, +) + + +class ListGroupsRequest(proto.Message): + r"""The ``ListGroup`` request. + + Attributes: + name (str): + Required. The project whose groups are to be listed. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + children_of_group (str): + A group name. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + + Returns groups whose ``parent_name`` field contains the + group name. If no groups have this parent, the results are + empty. + ancestors_of_group (str): + A group name. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + + Returns groups that are ancestors of the specified group. + The groups are returned in order, starting with the + immediate parent and ending with the most distant ancestor. + If the specified group has no immediate parent, the results + are empty. + descendants_of_group (str): + A group name. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + + Returns the descendants of the specified group. This is a + superset of the results returned by the + ``children_of_group`` filter, and includes + children-of-children, and so forth. + page_size (int): + A positive number that is the maximum number + of results to return. + page_token (str): + If this field is not empty then it must contain the + ``next_page_token`` value returned by a previous call to + this method. Using this field causes the method to return + additional results from the previous method call. + """ + + name = proto.Field(proto.STRING, number=7) + + children_of_group = proto.Field(proto.STRING, number=2, oneof="filter") + + ancestors_of_group = proto.Field(proto.STRING, number=3, oneof="filter") + + descendants_of_group = proto.Field(proto.STRING, number=4, oneof="filter") + + page_size = proto.Field(proto.INT32, number=5) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListGroupsResponse(proto.Message): + r"""The ``ListGroups`` response. + + Attributes: + group (Sequence[~.gm_group.Group]): + The groups that match the specified filters. + next_page_token (str): + If there are more results than have been returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + """ + + @property + def raw_page(self): + return self + + group = proto.RepeatedField(proto.MESSAGE, number=1, message=gm_group.Group,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetGroupRequest(proto.Message): + r"""The ``GetGroup`` request. + + Attributes: + name (str): + Required. The group to retrieve. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + """ + + name = proto.Field(proto.STRING, number=3) + + +class CreateGroupRequest(proto.Message): + r"""The ``CreateGroup`` request. + + Attributes: + name (str): + Required. The project in which to create the group. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + group (~.gm_group.Group): + Required. A group definition. It is an error to define the + ``name`` field because the system assigns the name. + validate_only (bool): + If true, validate this request but do not + create the group. + """ + + name = proto.Field(proto.STRING, number=4) + + group = proto.Field(proto.MESSAGE, number=2, message=gm_group.Group,) + + validate_only = proto.Field(proto.BOOL, number=3) + + +class UpdateGroupRequest(proto.Message): + r"""The ``UpdateGroup`` request. + + Attributes: + group (~.gm_group.Group): + Required. The new definition of the group. All fields of the + existing group, excepting ``name``, are replaced with the + corresponding fields of this group. + validate_only (bool): + If true, validate this request but do not + update the existing group. + """ + + group = proto.Field(proto.MESSAGE, number=2, message=gm_group.Group,) + + validate_only = proto.Field(proto.BOOL, number=3) + + +class DeleteGroupRequest(proto.Message): + r"""The ``DeleteGroup`` request. The default behavior is to be able to + delete a single group without any descendants. + + Attributes: + name (str): + Required. The group to delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + recursive (bool): + If this field is true, then the request means + to delete a group with all its descendants. + Otherwise, the request means to delete a group + only when it has no descendants. The default + value is false. + """ + + name = proto.Field(proto.STRING, number=3) + + recursive = proto.Field(proto.BOOL, number=4) + + +class ListGroupMembersRequest(proto.Message): + r"""The ``ListGroupMembers`` request. + + Attributes: + name (str): + Required. The group whose members are listed. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + page_size (int): + A positive number that is the maximum number + of results to return. + page_token (str): + If this field is not empty then it must contain the + ``next_page_token`` value returned by a previous call to + this method. Using this field causes the method to return + additional results from the previous method call. + filter (str): + An optional `list + filter `__ + describing the members to be returned. The filter may + reference the type, labels, and metadata of monitored + resources that comprise the group. For example, to return + only resources representing Compute Engine VM instances, use + this filter: + + :: + + `resource.type = "gce_instance"` + interval (~.common.TimeInterval): + An optional time interval for which results + should be returned. Only members that were part + of the group during the specified interval are + included in the response. If no interval is + provided then the group membership over the last + minute is returned. + """ + + name = proto.Field(proto.STRING, number=7) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + filter = proto.Field(proto.STRING, number=5) + + interval = proto.Field(proto.MESSAGE, number=6, message=common.TimeInterval,) + + +class ListGroupMembersResponse(proto.Message): + r"""The ``ListGroupMembers`` response. + + Attributes: + members (Sequence[~.monitored_resource.MonitoredResource]): + A set of monitored resources in the group. + next_page_token (str): + If there are more results than have been returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + total_size (int): + The total number of elements matching this + request. + """ + + @property + def raw_page(self): + return self + + members = proto.RepeatedField( + proto.MESSAGE, number=1, message=monitored_resource.MonitoredResource, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + total_size = proto.Field(proto.INT32, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/metric.py b/google/cloud/monitoring_v3/types/metric.py new file mode 100644 index 00000000..ee7a133e --- /dev/null +++ b/google/cloud/monitoring_v3/types/metric.py @@ -0,0 +1,334 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import label_pb2 as label # type: ignore +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.types import common + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "Point", + "TimeSeries", + "TimeSeriesDescriptor", + "TimeSeriesData", + "LabelValue", + "QueryError", + "TextLocator", + }, +) + + +class Point(proto.Message): + r"""A single data point in a time series. + + Attributes: + interval (~.common.TimeInterval): + The time interval to which the data point applies. For + ``GAUGE`` metrics, the start time is optional, but if it is + supplied, it must equal the end time. For ``DELTA`` metrics, + the start and end time should specify a non-zero interval, + with subsequent points specifying contiguous and + non-overlapping intervals. For ``CUMULATIVE`` metrics, the + start and end time should specify a non-zero interval, with + subsequent points specifying the same start time and + increasing end times, until an event resets the cumulative + value to zero and sets a new start time for the following + points. + value (~.common.TypedValue): + The value of the data point. + """ + + interval = proto.Field(proto.MESSAGE, number=1, message=common.TimeInterval,) + + value = proto.Field(proto.MESSAGE, number=2, message=common.TypedValue,) + + +class TimeSeries(proto.Message): + r"""A collection of data points that describes the time-varying + values of a metric. A time series is identified by a combination + of a fully-specified monitored resource and a fully-specified + metric. This type is used for both listing and creating time + series. + + Attributes: + metric (~.ga_metric.Metric): + The associated metric. A fully-specified + metric used to identify the time series. + resource (~.monitored_resource.MonitoredResource): + The associated monitored resource. Custom + metrics can use only certain monitored resource + types in their time series data. + metadata (~.monitored_resource.MonitoredResourceMetadata): + Output only. The associated monitored + resource metadata. When reading a a timeseries, + this field will include metadata labels that are + explicitly named in the reduction. When creating + a timeseries, this field is ignored. + metric_kind (~.ga_metric.MetricDescriptor.MetricKind): + The metric kind of the time series. When listing time + series, this metric kind might be different from the metric + kind of the associated metric if this time series is an + alignment or reduction of other time series. + + When creating a time series, this field is optional. If + present, it must be the same as the metric kind of the + associated metric. If the associated metric's descriptor + must be auto-created, then this field specifies the metric + kind of the new descriptor and must be either ``GAUGE`` (the + default) or ``CUMULATIVE``. + value_type (~.ga_metric.MetricDescriptor.ValueType): + The value type of the time series. When listing time series, + this value type might be different from the value type of + the associated metric if this time series is an alignment or + reduction of other time series. + + When creating a time series, this field is optional. If + present, it must be the same as the type of the data in the + ``points`` field. + points (Sequence[~.gm_metric.Point]): + The data points of this time series. When listing time + series, points are returned in reverse time order. + + When creating a time series, this field must contain exactly + one point and the point's type must be the same as the value + type of the associated metric. If the associated metric's + descriptor must be auto-created, then the value type of the + descriptor is determined by the point's type, which must be + ``BOOL``, ``INT64``, ``DOUBLE``, or ``DISTRIBUTION``. + """ + + metric = proto.Field(proto.MESSAGE, number=1, message=ga_metric.Metric,) + + resource = proto.Field( + proto.MESSAGE, number=2, message=monitored_resource.MonitoredResource, + ) + + metadata = proto.Field( + proto.MESSAGE, number=7, message=monitored_resource.MonitoredResourceMetadata, + ) + + metric_kind = proto.Field( + proto.ENUM, number=3, enum=ga_metric.MetricDescriptor.MetricKind, + ) + + value_type = proto.Field( + proto.ENUM, number=4, enum=ga_metric.MetricDescriptor.ValueType, + ) + + points = proto.RepeatedField(proto.MESSAGE, number=5, message=Point,) + + +class TimeSeriesDescriptor(proto.Message): + r"""A descriptor for the labels and points in a timeseries. + + Attributes: + label_descriptors (Sequence[~.label.LabelDescriptor]): + Descriptors for the labels. + point_descriptors (Sequence[~.gm_metric.TimeSeriesDescriptor.ValueDescriptor]): + Descriptors for the point data value columns. + """ + + class ValueDescriptor(proto.Message): + r"""A descriptor for the value columns in a data point. + + Attributes: + key (str): + The value key. + value_type (~.ga_metric.MetricDescriptor.ValueType): + The value type. + metric_kind (~.ga_metric.MetricDescriptor.MetricKind): + The value stream kind. + """ + + key = proto.Field(proto.STRING, number=1) + + value_type = proto.Field( + proto.ENUM, number=2, enum=ga_metric.MetricDescriptor.ValueType, + ) + + metric_kind = proto.Field( + proto.ENUM, number=3, enum=ga_metric.MetricDescriptor.MetricKind, + ) + + label_descriptors = proto.RepeatedField( + proto.MESSAGE, number=1, message=label.LabelDescriptor, + ) + + point_descriptors = proto.RepeatedField( + proto.MESSAGE, number=5, message=ValueDescriptor, + ) + + +class TimeSeriesData(proto.Message): + r"""Represents the values of a time series associated with a + TimeSeriesDescriptor. + + Attributes: + label_values (Sequence[~.gm_metric.LabelValue]): + The values of the labels in the time series identifier, + given in the same order as the ``label_descriptors`` field + of the TimeSeriesDescriptor associated with this object. + Each value must have a value of the type given in the + corresponding entry of ``label_descriptors``. + point_data (Sequence[~.gm_metric.TimeSeriesData.PointData]): + The points in the time series. + """ + + class PointData(proto.Message): + r"""A point's value columns and time interval. Each point has one or + more point values corresponding to the entries in + ``point_descriptors`` field in the TimeSeriesDescriptor associated + with this object. + + Attributes: + values (Sequence[~.common.TypedValue]): + The values that make up the point. + time_interval (~.common.TimeInterval): + The time interval associated with the point. + """ + + values = proto.RepeatedField( + proto.MESSAGE, number=1, message=common.TypedValue, + ) + + time_interval = proto.Field( + proto.MESSAGE, number=2, message=common.TimeInterval, + ) + + label_values = proto.RepeatedField(proto.MESSAGE, number=1, message="LabelValue",) + + point_data = proto.RepeatedField(proto.MESSAGE, number=2, message=PointData,) + + +class LabelValue(proto.Message): + r"""A label value. + + Attributes: + bool_value (bool): + A bool label value. + int64_value (int): + An int64 label value. + string_value (str): + A string label value. + """ + + bool_value = proto.Field(proto.BOOL, number=1, oneof="value") + + int64_value = proto.Field(proto.INT64, number=2, oneof="value") + + string_value = proto.Field(proto.STRING, number=3, oneof="value") + + +class QueryError(proto.Message): + r"""An error associated with a query in the time series query + language format. + + Attributes: + locator (~.gm_metric.TextLocator): + The location of the time series query + language text that this error applies to. + message (str): + The error message. + """ + + locator = proto.Field(proto.MESSAGE, number=1, message="TextLocator",) + + message = proto.Field(proto.STRING, number=2) + + +class TextLocator(proto.Message): + r"""A locator for text. Indicates a particular part of the text of a + request or of an object referenced in the request. + + For example, suppose the request field ``text`` contains: + + text: "The quick brown fox jumps over the lazy dog." + + Then the locator: + + source: "text" start_position { line: 1 column: 17 } end_position { + line: 1 column: 19 } + + refers to the part of the text: "fox". + + Attributes: + source (str): + The source of the text. The source may be a field in the + request, in which case its format is the format of the + google.rpc.BadRequest.FieldViolation.field field in + https://cloud.google.com/apis/design/errors#error_details. + It may also be be a source other than the request field + (e.g. a macro definition referenced in the text of the + query), in which case this is the name of the source (e.g. + the macro name). + start_position (~.gm_metric.TextLocator.Position): + The position of the first byte within the + text. + end_position (~.gm_metric.TextLocator.Position): + The position of the last byte within the + text. + nested_locator (~.gm_metric.TextLocator): + If ``source``, ``start_position``, and ``end_position`` + describe a call on some object (e.g. a macro in the time + series query language text) and a location is to be + designated in that object's text, ``nested_locator`` + identifies the location within that object. + nesting_reason (str): + When ``nested_locator`` is set, this field gives the reason + for the nesting. Usually, the reason is a macro invocation. + In that case, the macro name (including the leading '@') + signals the location of the macro call in the text and a + macro argument name (including the leading '$') signals the + location of the macro argument inside the macro body that + got substituted away. + """ + + class Position(proto.Message): + r"""The position of a byte within the text. + + Attributes: + line (int): + The line, starting with 1, where the byte is + positioned. + column (int): + The column within the line, starting with 1, + where the byte is positioned. This is a byte + index even though the text is UTF-8. + """ + + line = proto.Field(proto.INT32, number=1) + + column = proto.Field(proto.INT32, number=2) + + source = proto.Field(proto.STRING, number=1) + + start_position = proto.Field(proto.MESSAGE, number=2, message=Position,) + + end_position = proto.Field(proto.MESSAGE, number=3, message=Position,) + + nested_locator = proto.Field(proto.MESSAGE, number=4, message="TextLocator",) + + nesting_reason = proto.Field(proto.STRING, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/metric_service.py b/google/cloud/monitoring_v3/types/metric_service.py new file mode 100644 index 00000000..f2d05b03 --- /dev/null +++ b/google/cloud/monitoring_v3/types/metric_service.py @@ -0,0 +1,549 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import metric as gm_metric +from google.rpc import status_pb2 as gr_status # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "ListMonitoredResourceDescriptorsRequest", + "ListMonitoredResourceDescriptorsResponse", + "GetMonitoredResourceDescriptorRequest", + "ListMetricDescriptorsRequest", + "ListMetricDescriptorsResponse", + "GetMetricDescriptorRequest", + "CreateMetricDescriptorRequest", + "DeleteMetricDescriptorRequest", + "ListTimeSeriesRequest", + "ListTimeSeriesResponse", + "CreateTimeSeriesRequest", + "CreateTimeSeriesError", + "CreateTimeSeriesSummary", + "QueryTimeSeriesRequest", + "QueryTimeSeriesResponse", + "QueryErrorList", + }, +) + + +class ListMonitoredResourceDescriptorsRequest(proto.Message): + r"""The ``ListMonitoredResourceDescriptors`` request. + + Attributes: + name (str): + Required. The project on which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + filter (str): + An optional + `filter `__ + describing the descriptors to be returned. The filter can + reference the descriptor's type and labels. For example, the + following filter returns only Google Compute Engine + descriptors that have an ``id`` label: + + :: + + resource.type = starts_with("gce_") AND resource.label:id + page_size (int): + A positive number that is the maximum number + of results to return. + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return + additional results from the previous method call. + """ + + name = proto.Field(proto.STRING, number=5) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListMonitoredResourceDescriptorsResponse(proto.Message): + r"""The ``ListMonitoredResourceDescriptors`` response. + + Attributes: + resource_descriptors (Sequence[~.monitored_resource.MonitoredResourceDescriptor]): + The monitored resource descriptors that are available to + this project and that match ``filter``, if present. + next_page_token (str): + If there are more results than have been returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + """ + + @property + def raw_page(self): + return self + + resource_descriptors = proto.RepeatedField( + proto.MESSAGE, number=1, message=monitored_resource.MonitoredResourceDescriptor, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetMonitoredResourceDescriptorRequest(proto.Message): + r"""The ``GetMonitoredResourceDescriptor`` request. + + Attributes: + name (str): + Required. The monitored resource descriptor to get. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] + + The ``[RESOURCE_TYPE]`` is a predefined type, such as + ``cloudsql_database``. + """ + + name = proto.Field(proto.STRING, number=3) + + +class ListMetricDescriptorsRequest(proto.Message): + r"""The ``ListMetricDescriptors`` request. + + Attributes: + name (str): + Required. The project on which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + filter (str): + If this field is empty, all custom and system-defined metric + descriptors are returned. Otherwise, the + `filter `__ + specifies which metric descriptors are to be returned. For + example, the following filter matches all `custom + metrics `__: + + :: + + metric.type = starts_with("custom.googleapis.com/") + page_size (int): + A positive number that is the maximum number + of results to return. + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return + additional results from the previous method call. + """ + + name = proto.Field(proto.STRING, number=5) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListMetricDescriptorsResponse(proto.Message): + r"""The ``ListMetricDescriptors`` response. + + Attributes: + metric_descriptors (Sequence[~.ga_metric.MetricDescriptor]): + The metric descriptors that are available to the project and + that match the value of ``filter``, if present. + next_page_token (str): + If there are more results than have been returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + """ + + @property + def raw_page(self): + return self + + metric_descriptors = proto.RepeatedField( + proto.MESSAGE, number=1, message=ga_metric.MetricDescriptor, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetMetricDescriptorRequest(proto.Message): + r"""The ``GetMetricDescriptor`` request. + + Attributes: + name (str): + Required. The metric descriptor on which to execute the + request. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + + An example value of ``[METRIC_ID]`` is + ``"compute.googleapis.com/instance/disk/read_bytes_count"``. + """ + + name = proto.Field(proto.STRING, number=3) + + +class CreateMetricDescriptorRequest(proto.Message): + r"""The ``CreateMetricDescriptor`` request. + + Attributes: + name (str): + Required. The project on which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + metric_descriptor (~.ga_metric.MetricDescriptor): + Required. The new `custom + metric `__ + descriptor. + """ + + name = proto.Field(proto.STRING, number=3) + + metric_descriptor = proto.Field( + proto.MESSAGE, number=2, message=ga_metric.MetricDescriptor, + ) + + +class DeleteMetricDescriptorRequest(proto.Message): + r"""The ``DeleteMetricDescriptor`` request. + + Attributes: + name (str): + Required. The metric descriptor on which to execute the + request. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + + An example of ``[METRIC_ID]`` is: + ``"custom.googleapis.com/my_test_metric"``. + """ + + name = proto.Field(proto.STRING, number=3) + + +class ListTimeSeriesRequest(proto.Message): + r"""The ``ListTimeSeries`` request. + + Attributes: + name (str): + Required. The project on which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + filter (str): + Required. A `monitoring + filter `__ + that specifies which time series should be returned. The + filter must specify a single metric type, and can + additionally specify metric labels and other information. + For example: + + :: + + metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + metric.labels.instance_name = "my-instance-name". + interval (~.common.TimeInterval): + Required. The time interval for which results + should be returned. Only time series that + contain data points in the specified interval + are included in the response. + aggregation (~.common.Aggregation): + Specifies the alignment of data points in individual time + series as well as how to combine the retrieved time series + across specified labels. + + By default (if no ``aggregation`` is explicitly specified), + the raw time series data is returned. + order_by (str): + Unsupported: must be left blank. The points + in each time series are currently returned in + reverse time order (most recent to oldest). + view (~.metric_service.ListTimeSeriesRequest.TimeSeriesView): + Required. Specifies which information is + returned about the time series. + page_size (int): + A positive number that is the maximum number of results to + return. If ``page_size`` is empty or more than 100,000 + results, the effective ``page_size`` is 100,000 results. If + ``view`` is set to ``FULL``, this is the maximum number of + ``Points`` returned. If ``view`` is set to ``HEADERS``, this + is the maximum number of ``TimeSeries`` returned. + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return + additional results from the previous method call. + """ + + class TimeSeriesView(proto.Enum): + r"""Controls which fields are returned by ``ListTimeSeries``.""" + FULL = 0 + HEADERS = 1 + + name = proto.Field(proto.STRING, number=10) + + filter = proto.Field(proto.STRING, number=2) + + interval = proto.Field(proto.MESSAGE, number=4, message=common.TimeInterval,) + + aggregation = proto.Field(proto.MESSAGE, number=5, message=common.Aggregation,) + + order_by = proto.Field(proto.STRING, number=6) + + view = proto.Field(proto.ENUM, number=7, enum=TimeSeriesView,) + + page_size = proto.Field(proto.INT32, number=8) + + page_token = proto.Field(proto.STRING, number=9) + + +class ListTimeSeriesResponse(proto.Message): + r"""The ``ListTimeSeries`` response. + + Attributes: + time_series (Sequence[~.gm_metric.TimeSeries]): + One or more time series that match the filter + included in the request. + next_page_token (str): + If there are more results than have been returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + execution_errors (Sequence[~.gr_status.Status]): + Query execution errors that may have caused + the time series data returned to be incomplete. + """ + + @property + def raw_page(self): + return self + + time_series = proto.RepeatedField( + proto.MESSAGE, number=1, message=gm_metric.TimeSeries, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + execution_errors = proto.RepeatedField( + proto.MESSAGE, number=3, message=gr_status.Status, + ) + + +class CreateTimeSeriesRequest(proto.Message): + r"""The ``CreateTimeSeries`` request. + + Attributes: + name (str): + Required. The project on which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + time_series (Sequence[~.gm_metric.TimeSeries]): + Required. The new data to be added to a list of time series. + Adds at most one data point to each of several time series. + The new data point must be more recent than any other point + in its time series. Each ``TimeSeries`` value must fully + specify a unique time series by supplying all label values + for the metric and the monitored resource. + + The maximum number of ``TimeSeries`` objects per ``Create`` + request is 200. + """ + + name = proto.Field(proto.STRING, number=3) + + time_series = proto.RepeatedField( + proto.MESSAGE, number=2, message=gm_metric.TimeSeries, + ) + + +class CreateTimeSeriesError(proto.Message): + r"""DEPRECATED. Used to hold per-time-series error status. + + Attributes: + time_series (~.gm_metric.TimeSeries): + DEPRECATED. Time series ID that resulted in the ``status`` + error. + status (~.gr_status.Status): + DEPRECATED. The status of the requested write operation for + ``time_series``. + """ + + time_series = proto.Field(proto.MESSAGE, number=1, message=gm_metric.TimeSeries,) + + status = proto.Field(proto.MESSAGE, number=2, message=gr_status.Status,) + + +class CreateTimeSeriesSummary(proto.Message): + r"""Summary of the result of a failed request to write data to a + time series. + + Attributes: + total_point_count (int): + The number of points in the request. + success_point_count (int): + The number of points that were successfully + written. + errors (Sequence[~.metric_service.CreateTimeSeriesSummary.Error]): + The number of points that failed to be + written. Order is not guaranteed. + """ + + class Error(proto.Message): + r"""Detailed information about an error category. + + Attributes: + status (~.gr_status.Status): + The status of the requested write operation. + point_count (int): + The number of points that couldn't be written because of + ``status``. + """ + + status = proto.Field(proto.MESSAGE, number=1, message=gr_status.Status,) + + point_count = proto.Field(proto.INT32, number=2) + + total_point_count = proto.Field(proto.INT32, number=1) + + success_point_count = proto.Field(proto.INT32, number=2) + + errors = proto.RepeatedField(proto.MESSAGE, number=3, message=Error,) + + +class QueryTimeSeriesRequest(proto.Message): + r"""The ``QueryTimeSeries`` request. + + Attributes: + name (str): + Required. The project on which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + query (str): + Required. The query in the monitoring query + language format. The default time zone is in + UTC. + page_size (int): + A positive number that is the maximum number of + time_series_data to return. + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return + additional results from the previous method call. + """ + + name = proto.Field(proto.STRING, number=1) + + query = proto.Field(proto.STRING, number=7) + + page_size = proto.Field(proto.INT32, number=9) + + page_token = proto.Field(proto.STRING, number=10) + + +class QueryTimeSeriesResponse(proto.Message): + r"""The ``QueryTimeSeries`` response. + + Attributes: + time_series_descriptor (~.gm_metric.TimeSeriesDescriptor): + The descriptor for the time series data. + time_series_data (Sequence[~.gm_metric.TimeSeriesData]): + The time series data. + next_page_token (str): + If there are more results than have been returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + partial_errors (Sequence[~.gr_status.Status]): + Query execution errors that may have caused + the time series data returned to be incomplete. + The available data will be available in the + response. + """ + + @property + def raw_page(self): + return self + + time_series_descriptor = proto.Field( + proto.MESSAGE, number=8, message=gm_metric.TimeSeriesDescriptor, + ) + + time_series_data = proto.RepeatedField( + proto.MESSAGE, number=9, message=gm_metric.TimeSeriesData, + ) + + next_page_token = proto.Field(proto.STRING, number=10) + + partial_errors = proto.RepeatedField( + proto.MESSAGE, number=11, message=gr_status.Status, + ) + + +class QueryErrorList(proto.Message): + r"""This is an error detail intended to be used with INVALID_ARGUMENT + errors. + + Attributes: + errors (Sequence[~.gm_metric.QueryError]): + Errors in parsing the time series query + language text. The number of errors in the + response may be limited. + error_summary (str): + A summary of all the errors. + """ + + errors = proto.RepeatedField(proto.MESSAGE, number=1, message=gm_metric.QueryError,) + + error_summary = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/mutation_record.py b/google/cloud/monitoring_v3/types/mutation_record.py new file mode 100644 index 00000000..0c3140e8 --- /dev/null +++ b/google/cloud/monitoring_v3/types/mutation_record.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", manifest={"MutationRecord",}, +) + + +class MutationRecord(proto.Message): + r"""Describes a change made to a configuration. + + Attributes: + mutate_time (~.timestamp.Timestamp): + When the change occurred. + mutated_by (str): + The email address of the user making the + change. + """ + + mutate_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp,) + + mutated_by = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/notification.py b/google/cloud/monitoring_v3/types/notification.py new file mode 100644 index 00000000..02c0663c --- /dev/null +++ b/google/cloud/monitoring_v3/types/notification.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import label_pb2 as label # type: ignore +from google.api import launch_stage_pb2 as ga_launch_stage # type: ignore +from google.cloud.monitoring_v3.types import common +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={"NotificationChannelDescriptor", "NotificationChannel",}, +) + + +class NotificationChannelDescriptor(proto.Message): + r"""A description of a notification channel. The descriptor + includes the properties of the channel and the set of labels or + fields that must be specified to configure channels of a given + type. + + Attributes: + name (str): + The full REST resource name for this descriptor. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] + + In the above, ``[TYPE]`` is the value of the ``type`` field. + type_ (str): + The type of notification channel, such as + "email", "sms", etc. Notification channel types + are globally unique. + display_name (str): + A human-readable name for the notification + channel type. This form of the name is suitable + for a user interface. + description (str): + A human-readable description of the + notification channel type. The description may + include a description of the properties of the + channel and pointers to external documentation. + labels (Sequence[~.label.LabelDescriptor]): + The set of labels that must be defined to + identify a particular channel of the + corresponding type. Each label includes a + description for how that field should be + populated. + supported_tiers (Sequence[~.common.ServiceTier]): + The tiers that support this notification channel; the + project service tier must be one of the supported_tiers. + launch_stage (~.ga_launch_stage.LaunchStage): + The product launch stage for channels of this + type. + """ + + name = proto.Field(proto.STRING, number=6) + + type_ = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + labels = proto.RepeatedField( + proto.MESSAGE, number=4, message=label.LabelDescriptor, + ) + + supported_tiers = proto.RepeatedField( + proto.ENUM, number=5, enum=common.ServiceTier, + ) + + launch_stage = proto.Field(proto.ENUM, number=7, enum=ga_launch_stage.LaunchStage,) + + +class NotificationChannel(proto.Message): + r"""A ``NotificationChannel`` is a medium through which an alert is + delivered when a policy violation is detected. Examples of channels + include email, SMS, and third-party messaging applications. Fields + containing sensitive information like authentication tokens or + contact info are only partially populated on retrieval. + + Attributes: + type_ (str): + The type of the notification channel. This field matches the + value of the + [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] + field. + name (str): + The full REST resource name for this channel. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + + The ``[CHANNEL_ID]`` is automatically assigned by the server + on creation. + display_name (str): + An optional human-readable name for this + notification channel. It is recommended that you + specify a non-empty and unique name in order to + make it easier to identify the channels in your + project, though this is not enforced. The + display name is limited to 512 Unicode + characters. + description (str): + An optional human-readable description of + this notification channel. This description may + provide additional details, beyond the display + name, for the channel. This may not exceed 1024 + Unicode characters. + labels (Sequence[~.notification.NotificationChannel.LabelsEntry]): + Configuration fields that define the channel and its + behavior. The permissible and required labels are specified + in the + [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] + of the ``NotificationChannelDescriptor`` corresponding to + the ``type`` field. + user_labels (Sequence[~.notification.NotificationChannel.UserLabelsEntry]): + User-supplied key/value data that does not need to conform + to the corresponding ``NotificationChannelDescriptor``'s + schema, unlike the ``labels`` field. This field is intended + to be used for organizing and identifying the + ``NotificationChannel`` objects. + + The field can contain up to 64 entries. Each key and value + is limited to 63 Unicode characters or 128 bytes, whichever + is smaller. Labels and values can contain only lowercase + letters, numerals, underscores, and dashes. Keys must begin + with a letter. + verification_status (~.notification.NotificationChannel.VerificationStatus): + Indicates whether this channel has been verified or not. On + a + [``ListNotificationChannels``][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + or + [``GetNotificationChannel``][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + operation, this field is expected to be populated. + + If the value is ``UNVERIFIED``, then it indicates that the + channel is non-functioning (it both requires verification + and lacks verification); otherwise, it is assumed that the + channel works. + + If the channel is neither ``VERIFIED`` nor ``UNVERIFIED``, + it implies that the channel is of a type that does not + require verification or that this specific channel has been + exempted from verification because it was created prior to + verification being required for channels of this type. + + This field cannot be modified using a standard + [``UpdateNotificationChannel``][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + operation. To change the value of this field, you must call + [``VerifyNotificationChannel``][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. + enabled (~.wrappers.BoolValue): + Whether notifications are forwarded to the + described channel. This makes it possible to + disable delivery of notifications to a + particular channel without removing the channel + from all alerting policies that reference the + channel. This is a more convenient approach when + the change is temporary and you want to receive + notifications from the same set of alerting + policies on the channel at some point in the + future. + """ + + class VerificationStatus(proto.Enum): + r"""Indicates whether the channel has been verified or not. It is + illegal to specify this field in a + [``CreateNotificationChannel``][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] + or an + [``UpdateNotificationChannel``][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + operation. + """ + VERIFICATION_STATUS_UNSPECIFIED = 0 + UNVERIFIED = 1 + VERIFIED = 2 + + type_ = proto.Field(proto.STRING, number=1) + + name = proto.Field(proto.STRING, number=6) + + display_name = proto.Field(proto.STRING, number=3) + + description = proto.Field(proto.STRING, number=4) + + labels = proto.MapField(proto.STRING, proto.STRING, number=5) + + user_labels = proto.MapField(proto.STRING, proto.STRING, number=8) + + verification_status = proto.Field(proto.ENUM, number=9, enum=VerificationStatus,) + + enabled = proto.Field(proto.MESSAGE, number=11, message=wrappers.BoolValue,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/notification_service.py b/google/cloud/monitoring_v3/types/notification_service.py new file mode 100644 index 00000000..6e73dd6d --- /dev/null +++ b/google/cloud/monitoring_v3/types/notification_service.py @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.monitoring_v3.types import notification +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "ListNotificationChannelDescriptorsRequest", + "ListNotificationChannelDescriptorsResponse", + "GetNotificationChannelDescriptorRequest", + "CreateNotificationChannelRequest", + "ListNotificationChannelsRequest", + "ListNotificationChannelsResponse", + "GetNotificationChannelRequest", + "UpdateNotificationChannelRequest", + "DeleteNotificationChannelRequest", + "SendNotificationChannelVerificationCodeRequest", + "GetNotificationChannelVerificationCodeRequest", + "GetNotificationChannelVerificationCodeResponse", + "VerifyNotificationChannelRequest", + }, +) + + +class ListNotificationChannelDescriptorsRequest(proto.Message): + r"""The ``ListNotificationChannelDescriptors`` request. + + Attributes: + name (str): + Required. The REST resource name of the parent from which to + retrieve the notification channel descriptors. The expected + syntax is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + Note that this names the parent container in which to look + for the descriptors; to retrieve a single descriptor by + name, use the + [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + operation, instead. + page_size (int): + The maximum number of results to return in a + single response. If not set to a positive + number, a reasonable value will be chosen by the + service. + page_token (str): + If non-empty, ``page_token`` must contain a value returned + as the ``next_page_token`` in a previous response to request + the next set of results. + """ + + name = proto.Field(proto.STRING, number=4) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListNotificationChannelDescriptorsResponse(proto.Message): + r"""The ``ListNotificationChannelDescriptors`` response. + + Attributes: + channel_descriptors (Sequence[~.notification.NotificationChannelDescriptor]): + The monitored resource descriptors supported + for the specified project, optionally filtered. + next_page_token (str): + If not empty, indicates that there may be more results that + match the request. Use the value in the ``page_token`` field + in a subsequent request to fetch the next set of results. If + empty, all results have been returned. + """ + + @property + def raw_page(self): + return self + + channel_descriptors = proto.RepeatedField( + proto.MESSAGE, number=1, message=notification.NotificationChannelDescriptor, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetNotificationChannelDescriptorRequest(proto.Message): + r"""The ``GetNotificationChannelDescriptor`` response. + + Attributes: + name (str): + Required. The channel type for which to execute the request. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] + """ + + name = proto.Field(proto.STRING, number=3) + + +class CreateNotificationChannelRequest(proto.Message): + r"""The ``CreateNotificationChannel`` request. + + Attributes: + name (str): + Required. The project on which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + This names the container into which the channel will be + written, this does not name the newly created channel. The + resulting channel's name will have a normalized version of + this field as a prefix, but will add + ``/notificationChannels/[CHANNEL_ID]`` to identify the + channel. + notification_channel (~.notification.NotificationChannel): + Required. The definition of the ``NotificationChannel`` to + create. + """ + + name = proto.Field(proto.STRING, number=3) + + notification_channel = proto.Field( + proto.MESSAGE, number=2, message=notification.NotificationChannel, + ) + + +class ListNotificationChannelsRequest(proto.Message): + r"""The ``ListNotificationChannels`` request. + + Attributes: + name (str): + Required. The project on which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + + This names the container in which to look for the + notification channels; it does not name a specific channel. + To query a specific channel by REST resource name, use the + [``GetNotificationChannel``][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + operation. + filter (str): + If provided, this field specifies the criteria that must be + met by notification channels to be included in the response. + + For more details, see `sorting and + filtering `__. + order_by (str): + A comma-separated list of fields by which to sort the + result. Supports the same set of fields as in ``filter``. + Entries can be prefixed with a minus sign to sort in + descending rather than ascending order. + + For more details, see `sorting and + filtering `__. + page_size (int): + The maximum number of results to return in a + single response. If not set to a positive + number, a reasonable value will be chosen by the + service. + page_token (str): + If non-empty, ``page_token`` must contain a value returned + as the ``next_page_token`` in a previous response to request + the next set of results. + """ + + name = proto.Field(proto.STRING, number=5) + + filter = proto.Field(proto.STRING, number=6) + + order_by = proto.Field(proto.STRING, number=7) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListNotificationChannelsResponse(proto.Message): + r"""The ``ListNotificationChannels`` response. + + Attributes: + notification_channels (Sequence[~.notification.NotificationChannel]): + The notification channels defined for the + specified project. + next_page_token (str): + If not empty, indicates that there may be more results that + match the request. Use the value in the ``page_token`` field + in a subsequent request to fetch the next set of results. If + empty, all results have been returned. + """ + + @property + def raw_page(self): + return self + + notification_channels = proto.RepeatedField( + proto.MESSAGE, number=3, message=notification.NotificationChannel, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetNotificationChannelRequest(proto.Message): + r"""The ``GetNotificationChannel`` request. + + Attributes: + name (str): + Required. The channel for which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + """ + + name = proto.Field(proto.STRING, number=3) + + +class UpdateNotificationChannelRequest(proto.Message): + r"""The ``UpdateNotificationChannel`` request. + + Attributes: + update_mask (~.field_mask.FieldMask): + The fields to update. + notification_channel (~.notification.NotificationChannel): + Required. A description of the changes to be applied to the + specified notification channel. The description must provide + a definition for fields to be updated; the names of these + fields should also be included in the ``update_mask``. + """ + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + notification_channel = proto.Field( + proto.MESSAGE, number=3, message=notification.NotificationChannel, + ) + + +class DeleteNotificationChannelRequest(proto.Message): + r"""The ``DeleteNotificationChannel`` request. + + Attributes: + name (str): + Required. The channel for which to execute the request. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + force (bool): + If true, the notification channel will be + deleted regardless of its use in alert policies + (the policies will be updated to remove the + channel). If false, channels that are still + referenced by an existing alerting policy will + fail to be deleted in a delete operation. + """ + + name = proto.Field(proto.STRING, number=3) + + force = proto.Field(proto.BOOL, number=5) + + +class SendNotificationChannelVerificationCodeRequest(proto.Message): + r"""The ``SendNotificationChannelVerificationCode`` request. + + Attributes: + name (str): + Required. The notification channel to which + to send a verification code. + """ + + name = proto.Field(proto.STRING, number=1) + + +class GetNotificationChannelVerificationCodeRequest(proto.Message): + r"""The ``GetNotificationChannelVerificationCode`` request. + + Attributes: + name (str): + Required. The notification channel for which + a verification code is to be generated and + retrieved. This must name a channel that is + already verified; if the specified channel is + not verified, the request will fail. + expire_time (~.timestamp.Timestamp): + The desired expiration time. If specified, + the API will guarantee that the returned code + will not be valid after the specified timestamp; + however, the API cannot guarantee that the + returned code will be valid for at least as long + as the requested time (the API puts an upper + bound on the amount of time for which a code may + be valid). If omitted, a default expiration will + be used, which may be less than the max + permissible expiration (so specifying an + expiration may extend the code's lifetime over + omitting an expiration, even though the API does + impose an upper limit on the maximum expiration + that is permitted). + """ + + name = proto.Field(proto.STRING, number=1) + + expire_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + +class GetNotificationChannelVerificationCodeResponse(proto.Message): + r"""The ``GetNotificationChannelVerificationCode`` request. + + Attributes: + code (str): + The verification code, which may be used to + verify other channels that have an equivalent + identity (i.e. other channels of the same type + with the same fingerprint such as other email + channels with the same email address or other + sms channels with the same number). + expire_time (~.timestamp.Timestamp): + The expiration time associated with the code + that was returned. If an expiration was provided + in the request, this is the minimum of the + requested expiration in the request and the max + permitted expiration. + """ + + code = proto.Field(proto.STRING, number=1) + + expire_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + +class VerifyNotificationChannelRequest(proto.Message): + r"""The ``VerifyNotificationChannel`` request. + + Attributes: + name (str): + Required. The notification channel to verify. + code (str): + Required. The verification code that was delivered to the + channel as a result of invoking the + ``SendNotificationChannelVerificationCode`` API method or + that was retrieved from a verified channel via + ``GetNotificationChannelVerificationCode``. For example, one + might have "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in + general, one is only guaranteed that the code is valid + UTF-8; one should not make any assumptions regarding the + structure or format of the code). + """ + + name = proto.Field(proto.STRING, number=1) + + code = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/service.py b/google/cloud/monitoring_v3/types/service.py new file mode 100644 index 00000000..755c90dc --- /dev/null +++ b/google/cloud/monitoring_v3/types/service.py @@ -0,0 +1,574 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.type import calendar_period_pb2 as gt_calendar_period # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "Service", + "ServiceLevelObjective", + "ServiceLevelIndicator", + "BasicSli", + "Range", + "RequestBasedSli", + "TimeSeriesRatio", + "DistributionCut", + "WindowsBasedSli", + }, +) + + +class Service(proto.Message): + r"""A ``Service`` is a discrete, autonomous, and network-accessible + unit, designed to solve an individual concern + (`Wikipedia `__). + In Cloud Monitoring, a ``Service`` acts as the root resource under + which operational aspects of the service are accessible. + + Attributes: + name (str): + Resource name for this Service. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + display_name (str): + Name used for UI elements listing this + Service. + custom (~.gm_service.Service.Custom): + Custom service type. + app_engine (~.gm_service.Service.AppEngine): + Type used for App Engine services. + cloud_endpoints (~.gm_service.Service.CloudEndpoints): + Type used for Cloud Endpoints services. + cluster_istio (~.gm_service.Service.ClusterIstio): + Type used for Istio services that live in a + Kubernetes cluster. + mesh_istio (~.gm_service.Service.MeshIstio): + Type used for Istio services scoped to an + Istio mesh. + telemetry (~.gm_service.Service.Telemetry): + Configuration for how to query telemetry on a + Service. + """ + + class Custom(proto.Message): + r"""Custom view of service telemetry. Currently a place-holder + pending final design. + """ + + class AppEngine(proto.Message): + r"""App Engine service. Learn more at + https://cloud.google.com/appengine. + + Attributes: + module_id (str): + The ID of the App Engine module underlying this service. + Corresponds to the ``module_id`` resource label in the + ``gae_app`` monitored resource: + https://cloud.google.com/monitoring/api/resources#tag_gae_app + """ + + module_id = proto.Field(proto.STRING, number=1) + + class CloudEndpoints(proto.Message): + r"""Cloud Endpoints service. Learn more at + https://cloud.google.com/endpoints. + + Attributes: + service (str): + The name of the Cloud Endpoints service underlying this + service. Corresponds to the ``service`` resource label in + the ``api`` monitored resource: + https://cloud.google.com/monitoring/api/resources#tag_api + """ + + service = proto.Field(proto.STRING, number=1) + + class ClusterIstio(proto.Message): + r"""Istio service scoped to a single Kubernetes cluster. Learn + more at http://istio.io. + + Attributes: + location (str): + The location of the Kubernetes cluster in which this Istio + service is defined. Corresponds to the ``location`` resource + label in ``k8s_cluster`` resources. + cluster_name (str): + The name of the Kubernetes cluster in which this Istio + service is defined. Corresponds to the ``cluster_name`` + resource label in ``k8s_cluster`` resources. + service_namespace (str): + The namespace of the Istio service underlying this service. + Corresponds to the ``destination_service_namespace`` metric + label in Istio metrics. + service_name (str): + The name of the Istio service underlying this service. + Corresponds to the ``destination_service_name`` metric label + in Istio metrics. + """ + + location = proto.Field(proto.STRING, number=1) + + cluster_name = proto.Field(proto.STRING, number=2) + + service_namespace = proto.Field(proto.STRING, number=3) + + service_name = proto.Field(proto.STRING, number=4) + + class MeshIstio(proto.Message): + r"""Istio service scoped to an Istio mesh + + Attributes: + mesh_uid (str): + Identifier for the mesh in which this Istio service is + defined. Corresponds to the ``mesh_uid`` metric label in + Istio metrics. + service_namespace (str): + The namespace of the Istio service underlying this service. + Corresponds to the ``destination_service_namespace`` metric + label in Istio metrics. + service_name (str): + The name of the Istio service underlying this service. + Corresponds to the ``destination_service_name`` metric label + in Istio metrics. + """ + + mesh_uid = proto.Field(proto.STRING, number=1) + + service_namespace = proto.Field(proto.STRING, number=3) + + service_name = proto.Field(proto.STRING, number=4) + + class Telemetry(proto.Message): + r"""Configuration for how to query telemetry on a Service. + + Attributes: + resource_name (str): + The full name of the resource that defines this service. + Formatted as described in + https://cloud.google.com/apis/design/resource_names. + """ + + resource_name = proto.Field(proto.STRING, number=1) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + custom = proto.Field(proto.MESSAGE, number=6, oneof="identifier", message=Custom,) + + app_engine = proto.Field( + proto.MESSAGE, number=7, oneof="identifier", message=AppEngine, + ) + + cloud_endpoints = proto.Field( + proto.MESSAGE, number=8, oneof="identifier", message=CloudEndpoints, + ) + + cluster_istio = proto.Field( + proto.MESSAGE, number=9, oneof="identifier", message=ClusterIstio, + ) + + mesh_istio = proto.Field( + proto.MESSAGE, number=10, oneof="identifier", message=MeshIstio, + ) + + telemetry = proto.Field(proto.MESSAGE, number=13, message=Telemetry,) + + +class ServiceLevelObjective(proto.Message): + r"""A Service-Level Objective (SLO) describes a level of desired + good service. It consists of a service-level indicator (SLI), a + performance goal, and a period over which the objective is to be + evaluated against that goal. The SLO can use SLIs defined in a + number of different manners. Typical SLOs might include "99% of + requests in each rolling week have latency below 200 + milliseconds" or "99.5% of requests in each calendar month + return successfully." + + Attributes: + name (str): + Resource name for this ``ServiceLevelObjective``. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + display_name (str): + Name used for UI elements listing this SLO. + service_level_indicator (~.gm_service.ServiceLevelIndicator): + The definition of good service, used to measure and + calculate the quality of the ``Service``'s performance with + respect to a single aspect of service quality. + goal (float): + The fraction of service that must be good in order for this + objective to be met. ``0 < goal <= 0.999``. + rolling_period (~.duration.Duration): + A rolling time period, semantically "in the past + ````". Must be an integer multiple of 1 day + no larger than 30 days. + calendar_period (~.gt_calendar_period.CalendarPeriod): + A calendar period, semantically "since the start of the + current ````". At this time, only ``DAY``, + ``WEEK``, ``FORTNIGHT``, and ``MONTH`` are supported. + """ + + class View(proto.Enum): + r"""``ServiceLevelObjective.View`` determines what form of + ``ServiceLevelObjective`` is returned from + ``GetServiceLevelObjective``, ``ListServiceLevelObjectives``, and + ``ListServiceLevelObjectiveVersions`` RPCs. + """ + VIEW_UNSPECIFIED = 0 + FULL = 2 + EXPLICIT = 1 + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=11) + + service_level_indicator = proto.Field( + proto.MESSAGE, number=3, message="ServiceLevelIndicator", + ) + + goal = proto.Field(proto.DOUBLE, number=4) + + rolling_period = proto.Field( + proto.MESSAGE, number=5, oneof="period", message=duration.Duration, + ) + + calendar_period = proto.Field( + proto.ENUM, number=6, oneof="period", enum=gt_calendar_period.CalendarPeriod, + ) + + +class ServiceLevelIndicator(proto.Message): + r"""A Service-Level Indicator (SLI) describes the "performance" of a + service. For some services, the SLI is well-defined. In such cases, + the SLI can be described easily by referencing the well-known SLI + and providing the needed parameters. Alternatively, a "custom" SLI + can be defined with a query to the underlying metric store. An SLI + is defined to be ``good_service / total_service`` over any queried + time interval. The value of performance always falls into the range + ``0 <= performance <= 1``. A custom SLI describes how to compute + this ratio, whether this is by dividing values from a pair of time + series, cutting a ``Distribution`` into good and bad counts, or + counting time windows in which the service complies with a + criterion. For separation of concerns, a single Service-Level + Indicator measures performance for only one aspect of service + quality, such as fraction of successful queries or fast-enough + queries. + + Attributes: + basic_sli (~.gm_service.BasicSli): + Basic SLI on a well-known service type. + request_based (~.gm_service.RequestBasedSli): + Request-based SLIs + windows_based (~.gm_service.WindowsBasedSli): + Windows-based SLIs + """ + + basic_sli = proto.Field(proto.MESSAGE, number=4, oneof="type", message="BasicSli",) + + request_based = proto.Field( + proto.MESSAGE, number=1, oneof="type", message="RequestBasedSli", + ) + + windows_based = proto.Field( + proto.MESSAGE, number=2, oneof="type", message="WindowsBasedSli", + ) + + +class BasicSli(proto.Message): + r"""An SLI measuring performance on a well-known service type. + Performance will be computed on the basis of pre-defined metrics. + The type of the ``service_resource`` determines the metrics to use + and the ``service_resource.labels`` and ``metric_labels`` are used + to construct a monitoring filter to filter that metric down to just + the data relevant to this service. + + Attributes: + method (Sequence[str]): + OPTIONAL: The set of RPCs to which this SLI + is relevant. Telemetry from other methods will + not be used to calculate performance for this + SLI. If omitted, this SLI applies to all the + Service's methods. For service types that don't + support breaking down by method, setting this + field will result in an error. + location (Sequence[str]): + OPTIONAL: The set of locations to which this + SLI is relevant. Telemetry from other locations + will not be used to calculate performance for + this SLI. If omitted, this SLI applies to all + locations in which the Service has activity. For + service types that don't support breaking down + by location, setting this field will result in + an error. + version (Sequence[str]): + OPTIONAL: The set of API versions to which + this SLI is relevant. Telemetry from other API + versions will not be used to calculate + performance for this SLI. If omitted, this SLI + applies to all API versions. For service types + that don't support breaking down by version, + setting this field will result in an error. + availability (~.gm_service.BasicSli.AvailabilityCriteria): + Good service is defined to be the count of + requests made to this service that return + successfully. + latency (~.gm_service.BasicSli.LatencyCriteria): + Good service is defined to be the count of requests made to + this service that are fast enough with respect to + ``latency.threshold``. + """ + + class AvailabilityCriteria(proto.Message): + r"""Future parameters for the availability SLI.""" + + class LatencyCriteria(proto.Message): + r"""Parameters for a latency threshold SLI. + + Attributes: + threshold (~.duration.Duration): + Good service is defined to be the count of requests made to + this service that return in no more than ``threshold``. + """ + + threshold = proto.Field(proto.MESSAGE, number=3, message=duration.Duration,) + + method = proto.RepeatedField(proto.STRING, number=7) + + location = proto.RepeatedField(proto.STRING, number=8) + + version = proto.RepeatedField(proto.STRING, number=9) + + availability = proto.Field( + proto.MESSAGE, number=2, oneof="sli_criteria", message=AvailabilityCriteria, + ) + + latency = proto.Field( + proto.MESSAGE, number=3, oneof="sli_criteria", message=LatencyCriteria, + ) + + +class Range(proto.Message): + r"""Range of numerical values, inclusive of ``min`` and exclusive of + ``max``. If the open range "< range.max" is desired, set + ``range.min = -infinity``. If the open range ">= range.min" is + desired, set ``range.max = infinity``. + + Attributes: + min_ (float): + Range minimum. + max_ (float): + Range maximum. + """ + + min_ = proto.Field(proto.DOUBLE, number=1) + + max_ = proto.Field(proto.DOUBLE, number=2) + + +class RequestBasedSli(proto.Message): + r"""Service Level Indicators for which atomic units of service + are counted directly. + + Attributes: + good_total_ratio (~.gm_service.TimeSeriesRatio): + ``good_total_ratio`` is used when the ratio of + ``good_service`` to ``total_service`` is computed from two + ``TimeSeries``. + distribution_cut (~.gm_service.DistributionCut): + ``distribution_cut`` is used when ``good_service`` is a + count of values aggregated in a ``Distribution`` that fall + into a good range. The ``total_service`` is the total count + of all values aggregated in the ``Distribution``. + """ + + good_total_ratio = proto.Field( + proto.MESSAGE, number=1, oneof="method", message="TimeSeriesRatio", + ) + + distribution_cut = proto.Field( + proto.MESSAGE, number=3, oneof="method", message="DistributionCut", + ) + + +class TimeSeriesRatio(proto.Message): + r"""A ``TimeSeriesRatio`` specifies two ``TimeSeries`` to use for + computing the ``good_service / total_service`` ratio. The specified + ``TimeSeries`` must have ``ValueType = DOUBLE`` or + ``ValueType = INT64`` and must have ``MetricKind = DELTA`` or + ``MetricKind = CUMULATIVE``. The ``TimeSeriesRatio`` must specify + exactly two of good, bad, and total, and the relationship + ``good_service + bad_service = total_service`` will be assumed. + + Attributes: + good_service_filter (str): + A `monitoring + filter `__ + specifying a ``TimeSeries`` quantifying good service + provided. Must have ``ValueType = DOUBLE`` or + ``ValueType = INT64`` and must have ``MetricKind = DELTA`` + or ``MetricKind = CUMULATIVE``. + bad_service_filter (str): + A `monitoring + filter `__ + specifying a ``TimeSeries`` quantifying bad service, either + demanded service that was not provided or demanded service + that was of inadequate quality. Must have + ``ValueType = DOUBLE`` or ``ValueType = INT64`` and must + have ``MetricKind = DELTA`` or ``MetricKind = CUMULATIVE``. + total_service_filter (str): + A `monitoring + filter `__ + specifying a ``TimeSeries`` quantifying total demanded + service. Must have ``ValueType = DOUBLE`` or + ``ValueType = INT64`` and must have ``MetricKind = DELTA`` + or ``MetricKind = CUMULATIVE``. + """ + + good_service_filter = proto.Field(proto.STRING, number=4) + + bad_service_filter = proto.Field(proto.STRING, number=5) + + total_service_filter = proto.Field(proto.STRING, number=6) + + +class DistributionCut(proto.Message): + r"""A ``DistributionCut`` defines a ``TimeSeries`` and thresholds used + for measuring good service and total service. The ``TimeSeries`` + must have ``ValueType = DISTRIBUTION`` and ``MetricKind = DELTA`` or + ``MetricKind = CUMULATIVE``. The computed ``good_service`` will be + the count of values x in the ``Distribution`` such that + ``range.min <= x < range.max``. + + Attributes: + distribution_filter (str): + A `monitoring + filter `__ + specifying a ``TimeSeries`` aggregating values. Must have + ``ValueType = DISTRIBUTION`` and ``MetricKind = DELTA`` or + ``MetricKind = CUMULATIVE``. + range_ (~.gm_service.Range): + Range of values considered "good." For a one- + ided range, set one bound to an infinite value. + """ + + distribution_filter = proto.Field(proto.STRING, number=4) + + range_ = proto.Field(proto.MESSAGE, number=5, message=Range,) + + +class WindowsBasedSli(proto.Message): + r"""A ``WindowsBasedSli`` defines ``good_service`` as the count of time + windows for which the provided service was of good quality. Criteria + for determining if service was good are embedded in the + ``window_criterion``. + + Attributes: + good_bad_metric_filter (str): + A `monitoring + filter `__ + specifying a ``TimeSeries`` with ``ValueType = BOOL``. The + window is good if any ``true`` values appear in the window. + good_total_ratio_threshold (~.gm_service.WindowsBasedSli.PerformanceThreshold): + A window is good if its ``performance`` is high enough. + metric_mean_in_range (~.gm_service.WindowsBasedSli.MetricRange): + A window is good if the metric's value is in + a good range, averaged across returned streams. + metric_sum_in_range (~.gm_service.WindowsBasedSli.MetricRange): + A window is good if the metric's value is in + a good range, summed across returned streams. + window_period (~.duration.Duration): + Duration over which window quality is evaluated. Must be an + integer fraction of a day and at least ``60s``. + """ + + class PerformanceThreshold(proto.Message): + r"""A ``PerformanceThreshold`` is used when each window is good when + that window has a sufficiently high ``performance``. + + Attributes: + performance (~.gm_service.RequestBasedSli): + ``RequestBasedSli`` to evaluate to judge window quality. + basic_sli_performance (~.gm_service.BasicSli): + ``BasicSli`` to evaluate to judge window quality. + threshold (float): + If window ``performance >= threshold``, the window is + counted as good. + """ + + performance = proto.Field( + proto.MESSAGE, number=1, oneof="type", message=RequestBasedSli, + ) + + basic_sli_performance = proto.Field( + proto.MESSAGE, number=3, oneof="type", message=BasicSli, + ) + + threshold = proto.Field(proto.DOUBLE, number=2) + + class MetricRange(proto.Message): + r"""A ``MetricRange`` is used when each window is good when the value x + of a single ``TimeSeries`` satisfies ``range.min <= x < range.max``. + The provided ``TimeSeries`` must have ``ValueType = INT64`` or + ``ValueType = DOUBLE`` and ``MetricKind = GAUGE``. + + Attributes: + time_series (str): + A `monitoring + filter `__ + specifying the ``TimeSeries`` to use for evaluating window + quality. + range_ (~.gm_service.Range): + Range of values considered "good." For a one- + ided range, set one bound to an infinite value. + """ + + time_series = proto.Field(proto.STRING, number=1) + + range_ = proto.Field(proto.MESSAGE, number=4, message=Range,) + + good_bad_metric_filter = proto.Field( + proto.STRING, number=5, oneof="window_criterion" + ) + + good_total_ratio_threshold = proto.Field( + proto.MESSAGE, number=2, oneof="window_criterion", message=PerformanceThreshold, + ) + + metric_mean_in_range = proto.Field( + proto.MESSAGE, number=6, oneof="window_criterion", message=MetricRange, + ) + + metric_sum_in_range = proto.Field( + proto.MESSAGE, number=7, oneof="window_criterion", message=MetricRange, + ) + + window_period = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/service_service.py b/google/cloud/monitoring_v3/types/service_service.py new file mode 100644 index 00000000..78696748 --- /dev/null +++ b/google/cloud/monitoring_v3/types/service_service.py @@ -0,0 +1,357 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.monitoring_v3.types import service as gm_service +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "CreateServiceRequest", + "GetServiceRequest", + "ListServicesRequest", + "ListServicesResponse", + "UpdateServiceRequest", + "DeleteServiceRequest", + "CreateServiceLevelObjectiveRequest", + "GetServiceLevelObjectiveRequest", + "ListServiceLevelObjectivesRequest", + "ListServiceLevelObjectivesResponse", + "UpdateServiceLevelObjectiveRequest", + "DeleteServiceLevelObjectiveRequest", + }, +) + + +class CreateServiceRequest(proto.Message): + r"""The ``CreateService`` request. + + Attributes: + parent (str): + Required. Resource name of the parent workspace. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + service_id (str): + Optional. The Service id to use for this Service. If + omitted, an id will be generated instead. Must match the + pattern ``[a-z0-9\-]+`` + service (~.gm_service.Service): + Required. The ``Service`` to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + service_id = proto.Field(proto.STRING, number=3) + + service = proto.Field(proto.MESSAGE, number=2, message=gm_service.Service,) + + +class GetServiceRequest(proto.Message): + r"""The ``GetService`` request. + + Attributes: + name (str): + Required. Resource name of the ``Service``. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListServicesRequest(proto.Message): + r"""The ``ListServices`` request. + + Attributes: + parent (str): + Required. Resource name of the parent containing the listed + services, either a project or a Monitoring Workspace. The + formats are: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + workspaces/[HOST_PROJECT_ID_OR_NUMBER] + filter (str): + A filter specifying what ``Service``\ s to return. The + filter currently supports the following fields: + + :: + + - `identifier_case` + - `app_engine.module_id` + - `cloud_endpoints.service` + - `cluster_istio.location` + - `cluster_istio.cluster_name` + - `cluster_istio.service_namespace` + - `cluster_istio.service_name` + + ``identifier_case`` refers to which option in the identifier + oneof is populated. For example, the filter + ``identifier_case = "CUSTOM"`` would match all services with + a value for the ``custom`` field. Valid options are + "CUSTOM", "APP_ENGINE", "CLOUD_ENDPOINTS", and + "CLUSTER_ISTIO". + page_size (int): + A non-negative number that is the maximum + number of results to return. When 0, use default + page size. + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return + additional results from the previous method call. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListServicesResponse(proto.Message): + r"""The ``ListServices`` response. + + Attributes: + services (Sequence[~.gm_service.Service]): + The ``Service``\ s matching the specified filter. + next_page_token (str): + If there are more results than have been returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + """ + + @property + def raw_page(self): + return self + + services = proto.RepeatedField(proto.MESSAGE, number=1, message=gm_service.Service,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateServiceRequest(proto.Message): + r"""The ``UpdateService`` request. + + Attributes: + service (~.gm_service.Service): + Required. The ``Service`` to draw updates from. The given + ``name`` specifies the resource to update. + update_mask (~.field_mask.FieldMask): + A set of field paths defining which fields to + use for the update. + """ + + service = proto.Field(proto.MESSAGE, number=1, message=gm_service.Service,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteServiceRequest(proto.Message): + r"""The ``DeleteService`` request. + + Attributes: + name (str): + Required. Resource name of the ``Service`` to delete. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateServiceLevelObjectiveRequest(proto.Message): + r"""The ``CreateServiceLevelObjective`` request. + + Attributes: + parent (str): + Required. Resource name of the parent ``Service``. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + service_level_objective_id (str): + Optional. The ServiceLevelObjective id to use for this + ServiceLevelObjective. If omitted, an id will be generated + instead. Must match the pattern ``[a-z0-9\-]+`` + service_level_objective (~.gm_service.ServiceLevelObjective): + Required. The ``ServiceLevelObjective`` to create. The + provided ``name`` will be respected if no + ``ServiceLevelObjective`` exists with this name. + """ + + parent = proto.Field(proto.STRING, number=1) + + service_level_objective_id = proto.Field(proto.STRING, number=3) + + service_level_objective = proto.Field( + proto.MESSAGE, number=2, message=gm_service.ServiceLevelObjective, + ) + + +class GetServiceLevelObjectiveRequest(proto.Message): + r"""The ``GetServiceLevelObjective`` request. + + Attributes: + name (str): + Required. Resource name of the ``ServiceLevelObjective`` to + get. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + view (~.gm_service.ServiceLevelObjective.View): + View of the ``ServiceLevelObjective`` to return. If + ``DEFAULT``, return the ``ServiceLevelObjective`` as + originally defined. If ``EXPLICIT`` and the + ``ServiceLevelObjective`` is defined in terms of a + ``BasicSli``, replace the ``BasicSli`` with a + ``RequestBasedSli`` spelling out how the SLI is computed. + """ + + name = proto.Field(proto.STRING, number=1) + + view = proto.Field( + proto.ENUM, number=2, enum=gm_service.ServiceLevelObjective.View, + ) + + +class ListServiceLevelObjectivesRequest(proto.Message): + r"""The ``ListServiceLevelObjectives`` request. + + Attributes: + parent (str): + Required. Resource name of the parent containing the listed + SLOs, either a project or a Monitoring Workspace. The + formats are: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- + filter (str): + A filter specifying what ``ServiceLevelObjective``\ s to + return. + page_size (int): + A non-negative number that is the maximum + number of results to return. When 0, use default + page size. + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return + additional results from the previous method call. + view (~.gm_service.ServiceLevelObjective.View): + View of the ``ServiceLevelObjective``\ s to return. If + ``DEFAULT``, return each ``ServiceLevelObjective`` as + originally defined. If ``EXPLICIT`` and the + ``ServiceLevelObjective`` is defined in terms of a + ``BasicSli``, replace the ``BasicSli`` with a + ``RequestBasedSli`` spelling out how the SLI is computed. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + view = proto.Field( + proto.ENUM, number=5, enum=gm_service.ServiceLevelObjective.View, + ) + + +class ListServiceLevelObjectivesResponse(proto.Message): + r"""The ``ListServiceLevelObjectives`` response. + + Attributes: + service_level_objectives (Sequence[~.gm_service.ServiceLevelObjective]): + The ``ServiceLevelObjective``\ s matching the specified + filter. + next_page_token (str): + If there are more results than have been returned, then this + field is set to a non-empty value. To see the additional + results, use that value as ``page_token`` in the next call + to this method. + """ + + @property + def raw_page(self): + return self + + service_level_objectives = proto.RepeatedField( + proto.MESSAGE, number=1, message=gm_service.ServiceLevelObjective, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateServiceLevelObjectiveRequest(proto.Message): + r"""The ``UpdateServiceLevelObjective`` request. + + Attributes: + service_level_objective (~.gm_service.ServiceLevelObjective): + Required. The ``ServiceLevelObjective`` to draw updates + from. The given ``name`` specifies the resource to update. + update_mask (~.field_mask.FieldMask): + A set of field paths defining which fields to + use for the update. + """ + + service_level_objective = proto.Field( + proto.MESSAGE, number=1, message=gm_service.ServiceLevelObjective, + ) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteServiceLevelObjectiveRequest(proto.Message): + r"""The ``DeleteServiceLevelObjective`` request. + + Attributes: + name (str): + Required. Resource name of the ``ServiceLevelObjective`` to + delete. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + """ + + name = proto.Field(proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/span_context.py b/google/cloud/monitoring_v3/types/span_context.py new file mode 100644 index 00000000..dc7891b8 --- /dev/null +++ b/google/cloud/monitoring_v3/types/span_context.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module(package="google.monitoring.v3", manifest={"SpanContext",},) + + +class SpanContext(proto.Message): + r"""The context of a span, attached to + [Exemplars][google.api.Distribution.Exemplars] in + [Distribution][google.api.Distribution] values during aggregation. + + It contains the name of a span with format: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] + + Attributes: + span_name (str): + The resource name of the span. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] + + ``[TRACE_ID]`` is a unique identifier for a trace within a + project; it is a 32-character hexadecimal encoding of a + 16-byte array. + + ``[SPAN_ID]`` is a unique identifier for a span within a + trace; it is a 16-character hexadecimal encoding of an + 8-byte array. + """ + + span_name = proto.Field(proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/uptime.py b/google/cloud/monitoring_v3/types/uptime.py new file mode 100644 index 00000000..69ccc777 --- /dev/null +++ b/google/cloud/monitoring_v3/types/uptime.py @@ -0,0 +1,451 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import monitored_resource_pb2 as ga_monitored_resource # type: ignore +from google.protobuf import duration_pb2 as duration # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "UptimeCheckRegion", + "GroupResourceType", + "InternalChecker", + "UptimeCheckConfig", + "UptimeCheckIp", + }, +) + + +class UptimeCheckRegion(proto.Enum): + r"""The regions from which an Uptime check can be run.""" + REGION_UNSPECIFIED = 0 + USA = 1 + EUROPE = 2 + SOUTH_AMERICA = 3 + ASIA_PACIFIC = 4 + + +class GroupResourceType(proto.Enum): + r"""The supported resource types that can be used as values of + ``group_resource.resource_type``. ``INSTANCE`` includes + ``gce_instance`` and ``aws_ec2_instance`` resource types. The + resource types ``gae_app`` and ``uptime_url`` are not valid here + because group checks on App Engine modules and URLs are not allowed. + """ + RESOURCE_TYPE_UNSPECIFIED = 0 + INSTANCE = 1 + AWS_ELB_LOAD_BALANCER = 2 + + +class InternalChecker(proto.Message): + r"""An internal checker allows Uptime checks to run on + private/internal GCP resources. + + Attributes: + name (str): + A unique resource name for this InternalChecker. The format + is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] + + ``[PROJECT_ID_OR_NUMBER]`` is the Stackdriver Workspace + project for the Uptime check config associated with the + internal checker. + display_name (str): + The checker's human-readable name. The + display name should be unique within a + Stackdriver Workspace in order to make it easier + to identify; however, uniqueness is not + enforced. + network (str): + The `GCP VPC + network `__ where the + internal resource lives (ex: "default"). + gcp_zone (str): + The GCP zone the Uptime check should egress from. Only + respected for internal Uptime checks, where internal_network + is specified. + peer_project_id (str): + The GCP project ID where the internal checker + lives. Not necessary the same as the Workspace + project. + state (~.uptime.InternalChecker.State): + The current operational state of the internal + checker. + """ + + class State(proto.Enum): + r"""Operational states for an internal checker.""" + UNSPECIFIED = 0 + CREATING = 1 + RUNNING = 2 + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + network = proto.Field(proto.STRING, number=3) + + gcp_zone = proto.Field(proto.STRING, number=4) + + peer_project_id = proto.Field(proto.STRING, number=6) + + state = proto.Field(proto.ENUM, number=7, enum=State,) + + +class UptimeCheckConfig(proto.Message): + r"""This message configures which resources and services to + monitor for availability. + + Attributes: + name (str): + A unique resource name for this Uptime check configuration. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + + This field should be omitted when creating the Uptime check + configuration; on create, the resource name is assigned by + the server and included in the response. + display_name (str): + A human-friendly name for the Uptime check + configuration. The display name should be unique + within a Stackdriver Workspace in order to make + it easier to identify; however, uniqueness is + not enforced. Required. + monitored_resource (~.ga_monitored_resource.MonitoredResource): + The `monitored + resource `__ + associated with the configuration. The following monitored + resource types are supported for Uptime checks: + ``uptime_url``, ``gce_instance``, ``gae_app``, + ``aws_ec2_instance``, ``aws_elb_load_balancer`` + resource_group (~.uptime.UptimeCheckConfig.ResourceGroup): + The group resource associated with the + configuration. + http_check (~.uptime.UptimeCheckConfig.HttpCheck): + Contains information needed to make an HTTP + or HTTPS check. + tcp_check (~.uptime.UptimeCheckConfig.TcpCheck): + Contains information needed to make a TCP + check. + period (~.duration.Duration): + How often, in seconds, the Uptime check is performed. + Currently, the only supported values are ``60s`` (1 minute), + ``300s`` (5 minutes), ``600s`` (10 minutes), and ``900s`` + (15 minutes). Optional, defaults to ``60s``. + timeout (~.duration.Duration): + The maximum amount of time to wait for the + request to complete (must be between 1 and 60 + seconds). Required. + content_matchers (Sequence[~.uptime.UptimeCheckConfig.ContentMatcher]): + The content that is expected to appear in the data returned + by the target server against which the check is run. + Currently, only the first entry in the ``content_matchers`` + list is supported, and additional entries will be ignored. + This field is optional and should only be specified if a + content match is required as part of the/ Uptime check. + selected_regions (Sequence[~.uptime.UptimeCheckRegion]): + The list of regions from which the check will + be run. Some regions contain one location, and + others contain more than one. If this field is + specified, enough regions must be provided to + include a minimum of 3 locations. Not + specifying this field will result in Uptime + checks running from all available regions. + is_internal (bool): + If this is ``true``, then checks are made only from the + 'internal_checkers'. If it is ``false``, then checks are + made only from the 'selected_regions'. It is an error to + provide 'selected_regions' when is_internal is ``true``, or + to provide 'internal_checkers' when is_internal is + ``false``. + internal_checkers (Sequence[~.uptime.InternalChecker]): + The internal checkers that this check will egress from. If + ``is_internal`` is ``true`` and this list is empty, the + check will egress from all the InternalCheckers configured + for the project that owns this ``UptimeCheckConfig``. + """ + + class ResourceGroup(proto.Message): + r"""The resource submessage for group checks. It can be used + instead of a monitored resource, when multiple resources are + being monitored. + + Attributes: + group_id (str): + The group of resources being monitored. Should be only the + ``[GROUP_ID]``, and not the full-path + ``projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]``. + resource_type (~.uptime.GroupResourceType): + The resource type of the group members. + """ + + group_id = proto.Field(proto.STRING, number=1) + + resource_type = proto.Field(proto.ENUM, number=2, enum="GroupResourceType",) + + class HttpCheck(proto.Message): + r"""Information involved in an HTTP/HTTPS Uptime check request. + + Attributes: + request_method (~.uptime.UptimeCheckConfig.HttpCheck.RequestMethod): + The HTTP request method to use for the check. + use_ssl (bool): + If ``true``, use HTTPS instead of HTTP to run the check. + path (str): + Optional (defaults to "/"). The path to the page against + which to run the check. Will be combined with the ``host`` + (specified within the ``monitored_resource``) and ``port`` + to construct the full URL. If the provided path does not + begin with "/", a "/" will be prepended automatically. + port (int): + Optional (defaults to 80 when ``use_ssl`` is ``false``, and + 443 when ``use_ssl`` is ``true``). The TCP port on the HTTP + server against which to run the check. Will be combined with + host (specified within the ``monitored_resource``) and + ``path`` to construct the full URL. + auth_info (~.uptime.UptimeCheckConfig.HttpCheck.BasicAuthentication): + The authentication information. Optional when + creating an HTTP check; defaults to empty. + mask_headers (bool): + Boolean specifiying whether to encrypt the header + information. Encryption should be specified for any headers + related to authentication that you do not wish to be seen + when retrieving the configuration. The server will be + responsible for encrypting the headers. On Get/List calls, + if ``mask_headers`` is set to ``true`` then the headers will + be obscured with ``******.`` + headers (Sequence[~.uptime.UptimeCheckConfig.HttpCheck.HeadersEntry]): + The list of headers to send as part of the + Uptime check request. If two headers have the + same key and different values, they should be + entered as a single header, with the value being + a comma-separated list of all the desired values + as described at + https://www.w3.org/Protocols/rfc2616/rfc2616.txt + (page 31). Entering two separate headers with + the same key in a Create call will cause the + first to be overwritten by the second. The + maximum number of headers allowed is 100. + content_type (~.uptime.UptimeCheckConfig.HttpCheck.ContentType): + The content type to use for the check. + validate_ssl (bool): + Boolean specifying whether to include SSL certificate + validation as a part of the Uptime check. Only applies to + checks where ``monitored_resource`` is set to + ``uptime_url``. If ``use_ssl`` is ``false``, setting + ``validate_ssl`` to ``true`` has no effect. + body (bytes): + The request body associated with the HTTP request. If + ``content_type`` is ``URL_ENCODED``, the body passed in must + be URL-encoded. Users can provide a ``Content-Length`` + header via the ``headers`` field or the API will do so. The + maximum byte size is 1 megabyte. Note: As with all ``bytes`` + fields JSON representations are base64 encoded. + """ + + class RequestMethod(proto.Enum): + r"""The HTTP request method options.""" + METHOD_UNSPECIFIED = 0 + GET = 1 + POST = 2 + + class ContentType(proto.Enum): + r"""Header options corresponding to the Content-Type of the body in HTTP + requests. Note that a ``Content-Type`` header cannot be present in + the ``headers`` field if this field is specified. + """ + TYPE_UNSPECIFIED = 0 + URL_ENCODED = 1 + + class BasicAuthentication(proto.Message): + r"""The authentication parameters to provide to the specified resource + or URL that requires a username and password. Currently, only `Basic + HTTP authentication `__ is + supported in Uptime checks. + + Attributes: + username (str): + The username to use when authenticating with + the HTTP server. + password (str): + The password to use when authenticating with + the HTTP server. + """ + + username = proto.Field(proto.STRING, number=1) + + password = proto.Field(proto.STRING, number=2) + + request_method = proto.Field( + proto.ENUM, number=8, enum="UptimeCheckConfig.HttpCheck.RequestMethod", + ) + + use_ssl = proto.Field(proto.BOOL, number=1) + + path = proto.Field(proto.STRING, number=2) + + port = proto.Field(proto.INT32, number=3) + + auth_info = proto.Field( + proto.MESSAGE, + number=4, + message="UptimeCheckConfig.HttpCheck.BasicAuthentication", + ) + + mask_headers = proto.Field(proto.BOOL, number=5) + + headers = proto.MapField(proto.STRING, proto.STRING, number=6) + + content_type = proto.Field( + proto.ENUM, number=9, enum="UptimeCheckConfig.HttpCheck.ContentType", + ) + + validate_ssl = proto.Field(proto.BOOL, number=7) + + body = proto.Field(proto.BYTES, number=10) + + class TcpCheck(proto.Message): + r"""Information required for a TCP Uptime check request. + + Attributes: + port (int): + The TCP port on the server against which to run the check. + Will be combined with host (specified within the + ``monitored_resource``) to construct the full URL. Required. + """ + + port = proto.Field(proto.INT32, number=1) + + class ContentMatcher(proto.Message): + r"""Optional. Used to perform content matching. This allows + matching based on substrings and regular expressions, together + with their negations. Only the first 4 MB of an HTTP or + HTTPS check's response (and the first 1 MB of a TCP check's + response) are examined for purposes of content matching. + + Attributes: + content (str): + String or regex content to match. Maximum 1024 bytes. An + empty ``content`` string indicates no content matching is to + be performed. + matcher (~.uptime.UptimeCheckConfig.ContentMatcher.ContentMatcherOption): + The type of content matcher that will be applied to the + server output, compared to the ``content`` string when the + check is run. + """ + + class ContentMatcherOption(proto.Enum): + r"""Options to perform content matching.""" + CONTENT_MATCHER_OPTION_UNSPECIFIED = 0 + CONTAINS_STRING = 1 + NOT_CONTAINS_STRING = 2 + MATCHES_REGEX = 3 + NOT_MATCHES_REGEX = 4 + + content = proto.Field(proto.STRING, number=1) + + matcher = proto.Field( + proto.ENUM, + number=2, + enum="UptimeCheckConfig.ContentMatcher.ContentMatcherOption", + ) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + monitored_resource = proto.Field( + proto.MESSAGE, + number=3, + oneof="resource", + message=ga_monitored_resource.MonitoredResource, + ) + + resource_group = proto.Field( + proto.MESSAGE, number=4, oneof="resource", message=ResourceGroup, + ) + + http_check = proto.Field( + proto.MESSAGE, number=5, oneof="check_request_type", message=HttpCheck, + ) + + tcp_check = proto.Field( + proto.MESSAGE, number=6, oneof="check_request_type", message=TcpCheck, + ) + + period = proto.Field(proto.MESSAGE, number=7, message=duration.Duration,) + + timeout = proto.Field(proto.MESSAGE, number=8, message=duration.Duration,) + + content_matchers = proto.RepeatedField( + proto.MESSAGE, number=9, message=ContentMatcher, + ) + + selected_regions = proto.RepeatedField( + proto.ENUM, number=10, enum="UptimeCheckRegion", + ) + + is_internal = proto.Field(proto.BOOL, number=15) + + internal_checkers = proto.RepeatedField( + proto.MESSAGE, number=14, message=InternalChecker, + ) + + +class UptimeCheckIp(proto.Message): + r"""Contains the region, location, and list of IP + addresses where checkers in the location run from. + + Attributes: + region (~.uptime.UptimeCheckRegion): + A broad region category in which the IP + address is located. + location (str): + A more specific location within the region + that typically encodes a particular + city/town/metro (and its containing + state/province or country) within the broader + umbrella region category. + ip_address (str): + The IP address from which the Uptime check + originates. This is a fully specified IP address + (not an IP address range). Most IP addresses, as + of this publication, are in IPv4 format; + however, one should not rely on the IP addresses + being in IPv4 format indefinitely, and should + support interpreting this field in either IPv4 + or IPv6 format. + """ + + region = proto.Field(proto.ENUM, number=1, enum="UptimeCheckRegion",) + + location = proto.Field(proto.STRING, number=2) + + ip_address = proto.Field(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/monitoring_v3/types/uptime_service.py b/google/cloud/monitoring_v3/types/uptime_service.py new file mode 100644 index 00000000..405bf976 --- /dev/null +++ b/google/cloud/monitoring_v3/types/uptime_service.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.monitoring_v3.types import uptime +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.monitoring.v3", + manifest={ + "ListUptimeCheckConfigsRequest", + "ListUptimeCheckConfigsResponse", + "GetUptimeCheckConfigRequest", + "CreateUptimeCheckConfigRequest", + "UpdateUptimeCheckConfigRequest", + "DeleteUptimeCheckConfigRequest", + "ListUptimeCheckIpsRequest", + "ListUptimeCheckIpsResponse", + }, +) + + +class ListUptimeCheckConfigsRequest(proto.Message): + r"""The protocol for the ``ListUptimeCheckConfigs`` request. + + Attributes: + parent (str): + Required. The project whose Uptime check configurations are + listed. The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + page_size (int): + The maximum number of results to return in a single + response. The server may further constrain the maximum + number of results returned in a single page. If the + page_size is <=0, the server will decide the number of + results to be returned. + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return more + results from the previous method call. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListUptimeCheckConfigsResponse(proto.Message): + r"""The protocol for the ``ListUptimeCheckConfigs`` response. + + Attributes: + uptime_check_configs (Sequence[~.uptime.UptimeCheckConfig]): + The returned Uptime check configurations. + next_page_token (str): + This field represents the pagination token to retrieve the + next page of results. If the value is empty, it means no + further results for the request. To retrieve the next page + of results, the value of the next_page_token is passed to + the subsequent List method call (in the request message's + page_token field). + total_size (int): + The total number of Uptime check + configurations for the project, irrespective of + any pagination. + """ + + @property + def raw_page(self): + return self + + uptime_check_configs = proto.RepeatedField( + proto.MESSAGE, number=1, message=uptime.UptimeCheckConfig, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + total_size = proto.Field(proto.INT32, number=3) + + +class GetUptimeCheckConfigRequest(proto.Message): + r"""The protocol for the ``GetUptimeCheckConfig`` request. + + Attributes: + name (str): + Required. The Uptime check configuration to retrieve. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateUptimeCheckConfigRequest(proto.Message): + r"""The protocol for the ``CreateUptimeCheckConfig`` request. + + Attributes: + parent (str): + Required. The project in which to create the Uptime check. + The format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER] + uptime_check_config (~.uptime.UptimeCheckConfig): + Required. The new Uptime check configuration. + """ + + parent = proto.Field(proto.STRING, number=1) + + uptime_check_config = proto.Field( + proto.MESSAGE, number=2, message=uptime.UptimeCheckConfig, + ) + + +class UpdateUptimeCheckConfigRequest(proto.Message): + r"""The protocol for the ``UpdateUptimeCheckConfig`` request. + + Attributes: + update_mask (~.field_mask.FieldMask): + Optional. If present, only the listed fields + in the current Uptime check configuration are + updated with values from the new configuration. + If this field is empty, then the current + configuration is completely replaced with the + new configuration. + uptime_check_config (~.uptime.UptimeCheckConfig): + Required. If an ``updateMask`` has been specified, this + field gives the values for the set of fields mentioned in + the ``updateMask``. If an ``updateMask`` has not been given, + this Uptime check configuration replaces the current + configuration. If a field is mentioned in ``updateMask`` but + the corresonding field is omitted in this partial Uptime + check configuration, it has the effect of deleting/clearing + the field from the configuration on the server. + + The following fields can be updated: ``display_name``, + ``http_check``, ``tcp_check``, ``timeout``, + ``content_matchers``, and ``selected_regions``. + """ + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + uptime_check_config = proto.Field( + proto.MESSAGE, number=3, message=uptime.UptimeCheckConfig, + ) + + +class DeleteUptimeCheckConfigRequest(proto.Message): + r"""The protocol for the ``DeleteUptimeCheckConfig`` request. + + Attributes: + name (str): + Required. The Uptime check configuration to delete. The + format is: + + :: + + projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListUptimeCheckIpsRequest(proto.Message): + r"""The protocol for the ``ListUptimeCheckIps`` request. + + Attributes: + page_size (int): + The maximum number of results to return in a single + response. The server may further constrain the maximum + number of results returned in a single page. If the + page_size is <=0, the server will decide the number of + results to be returned. NOTE: this field is not yet + implemented + page_token (str): + If this field is not empty then it must contain the + ``nextPageToken`` value returned by a previous call to this + method. Using this field causes the method to return more + results from the previous method call. NOTE: this field is + not yet implemented + """ + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListUptimeCheckIpsResponse(proto.Message): + r"""The protocol for the ``ListUptimeCheckIps`` response. + + Attributes: + uptime_check_ips (Sequence[~.uptime.UptimeCheckIp]): + The returned list of IP addresses (including + region and location) that the checkers run from. + next_page_token (str): + This field represents the pagination token to retrieve the + next page of results. If the value is empty, it means no + further results for the request. To retrieve the next page + of results, the value of the next_page_token is passed to + the subsequent List method call (in the request message's + page_token field). NOTE: this field is not yet implemented + """ + + @property + def raw_page(self): + return self + + uptime_check_ips = proto.RepeatedField( + proto.MESSAGE, number=1, message=uptime.UptimeCheckIp, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..4505b485 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/noxfile.py b/noxfile.py index 481973cc..a1f04317 100644 --- a/noxfile.py +++ b/noxfile.py @@ -27,8 +27,8 @@ BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -70,6 +70,8 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. + session.install("asyncmock", "pytest-asyncio") + session.install("mock", "pytest", "pytest-cov") session.install("-e", ".") @@ -139,7 +141,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=92") + session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") @@ -149,12 +151,12 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") + session.install("sphinx", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( "sphinx-build", - "-W", # warnings as errors + # "-W", # warnings as errors "-T", # show full traceback on exception "-N", # no colors "-b", diff --git a/samples/snippets/v3/alerts-client/snippets.py b/samples/snippets/v3/alerts-client/snippets.py index 80254232..fa9c2d9f 100644 --- a/samples/snippets/v3/alerts-client/snippets.py +++ b/samples/snippets/v3/alerts-client/snippets.py @@ -18,28 +18,40 @@ import json import os +import google.api_core.exceptions from google.cloud import monitoring_v3 -import google.protobuf.json_format +from google.protobuf import field_mask_pb2 as field_mask +import proto import tabulate # [START monitoring_alert_list_policies] def list_alert_policies(project_name): client = monitoring_v3.AlertPolicyServiceClient() - policies = client.list_alert_policies(project_name) - print(tabulate.tabulate( - [(policy.name, policy.display_name) for policy in policies], - ('name', 'display_name'))) + policies = client.list_alert_policies(name=project_name) + print( + tabulate.tabulate( + [(policy.name, policy.display_name) for policy in policies], + ("name", "display_name"), + ) + ) + + # [END monitoring_alert_list_policies] # [START monitoring_alert_list_channels] def list_notification_channels(project_name): client = monitoring_v3.NotificationChannelServiceClient() - channels = client.list_notification_channels(project_name) - print(tabulate.tabulate( - [(channel.name, channel.display_name) for channel in channels], - ('name', 'display_name'))) + channels = client.list_notification_channels(name=project_name) + print( + tabulate.tabulate( + [(channel.name, channel.display_name) for channel in channels], + ("name", "display_name"), + ) + ) + + # [END monitoring_alert_list_channels] @@ -56,37 +68,50 @@ def enable_alert_policies(project_name, enable, filter_=None): """ client = monitoring_v3.AlertPolicyServiceClient() - policies = client.list_alert_policies(project_name, filter_=filter_) + policies = client.list_alert_policies( + request={"name": project_name, "filter": filter_} + ) for policy in policies: - if bool(enable) == policy.enabled.value: - print('Policy', policy.name, 'is already', - 'enabled' if policy.enabled.value else 'disabled') + if bool(enable) == policy.enabled: + print( + "Policy", + policy.name, + "is already", + "enabled" if policy.enabled else "disabled", + ) else: - policy.enabled.value = bool(enable) - mask = monitoring_v3.types.field_mask_pb2.FieldMask() - mask.paths.append('enabled') - client.update_alert_policy(policy, mask) - print('Enabled' if enable else 'Disabled', policy.name) + policy.enabled = bool(enable) + mask = field_mask.FieldMask() + mask.paths.append("enabled") + client.update_alert_policy(alert_policy=policy, update_mask=mask) + print("Enabled" if enable else "Disabled", policy.name) + + # [END monitoring_alert_enable_policies] # [START monitoring_alert_replace_channels] def replace_notification_channels(project_name, alert_policy_id, channel_ids): - _, project_id = project_name.split('/') + _, project_id = project_name.split("/") alert_client = monitoring_v3.AlertPolicyServiceClient() channel_client = monitoring_v3.NotificationChannelServiceClient() - policy = monitoring_v3.types.alert_pb2.AlertPolicy() + policy = monitoring_v3.AlertPolicy() policy.name = alert_client.alert_policy_path(project_id, alert_policy_id) for channel_id in channel_ids: policy.notification_channels.append( - channel_client.notification_channel_path(project_id, channel_id)) + channel_client.notification_channel_path(project_id, channel_id) + ) + + mask = field_mask.FieldMask() + mask.paths.append("notification_channels") + updated_policy = alert_client.update_alert_policy( + alert_policy=policy, update_mask=mask + ) + print("Updated", updated_policy.name) + - mask = monitoring_v3.types.field_mask_pb2.FieldMask() - mask.paths.append('notification_channels') - updated_policy = alert_client.update_alert_policy(policy, mask) - print('Updated', updated_policy.name) # [END monitoring_alert_replace_channels] @@ -94,16 +119,16 @@ def replace_notification_channels(project_name, alert_policy_id, channel_ids): def delete_notification_channels(project_name, channel_ids, force=None): channel_client = monitoring_v3.NotificationChannelServiceClient() for channel_id in channel_ids: - channel_name = '{}/notificationChannels/{}'.format( - project_name, channel_id) + channel_name = "{}/notificationChannels/{}".format(project_name, channel_id) try: - channel_client.delete_notification_channel( - channel_name, force=force) - print('Channel {} deleted'.format(channel_name)) + channel_client.delete_notification_channel(name=channel_name, force=force) + print("Channel {} deleted".format(channel_name)) except ValueError: - print('The parameters are invalid') + print("The parameters are invalid") except Exception as e: - print('API call failed: {}'.format(e)) + print("API call failed: {}".format(e)) + + # [END monitoring_alert_delete_channel] @@ -111,25 +136,29 @@ def delete_notification_channels(project_name, channel_ids, force=None): def backup(project_name, backup_filename): alert_client = monitoring_v3.AlertPolicyServiceClient() channel_client = monitoring_v3.NotificationChannelServiceClient() - record = {'project_name': project_name, - 'policies': list(alert_client.list_alert_policies(project_name)), - 'channels': list(channel_client.list_notification_channels( - project_name))} - json.dump(record, open(backup_filename, 'wt'), cls=ProtoEncoder, indent=2) - print('Backed up alert policies and notification channels to {}.'.format( - backup_filename) + record = { + "project_name": project_name, + "policies": list(alert_client.list_alert_policies(name=project_name)), + "channels": list(channel_client.list_notification_channels(name=project_name)), + } + json.dump(record, open(backup_filename, "wt"), cls=ProtoEncoder, indent=2) + print( + "Backed up alert policies and notification channels to {}.".format( + backup_filename + ) ) class ProtoEncoder(json.JSONEncoder): - """Uses google.protobuf.json_format to encode protobufs as json.""" + """Encode protobufs as json.""" + def default(self, obj): - if type(obj) in (monitoring_v3.types.alert_pb2.AlertPolicy, - monitoring_v3.types.notification_pb2. - NotificationChannel): - text = google.protobuf.json_format.MessageToJson(obj) + if type(obj) in (monitoring_v3.AlertPolicy, monitoring_v3.NotificationChannel): + text = proto.Message.to_json(obj) return json.loads(text) return super(ProtoEncoder, self).default(obj) + + # [END monitoring_alert_backup_policies] @@ -139,21 +168,25 @@ def default(self, obj): # [START monitoring_alert_update_channel] # [START monitoring_alert_enable_channel] def restore(project_name, backup_filename): - print('Loading alert policies and notification channels from {}.'.format( - backup_filename) + print( + "Loading alert policies and notification channels from {}.".format( + backup_filename + ) ) - record = json.load(open(backup_filename, 'rt')) - is_same_project = project_name == record['project_name'] + record = json.load(open(backup_filename, "rt")) + is_same_project = project_name == record["project_name"] # Convert dicts to AlertPolicies. - policies_json = [json.dumps(policy) for policy in record['policies']] - policies = [google.protobuf.json_format.Parse( - policy_json, monitoring_v3.types.alert_pb2.AlertPolicy()) - for policy_json in policies_json] + policies_json = [json.dumps(policy) for policy in record["policies"]] + policies = [ + monitoring_v3.AlertPolicy.from_json(policy_json) + for policy_json in policies_json + ] # Convert dicts to NotificationChannels - channels_json = [json.dumps(channel) for channel in record['channels']] - channels = [google.protobuf.json_format.Parse( - channel_json, monitoring_v3.types.notification_pb2. - NotificationChannel()) for channel_json in channels_json] + channels_json = [json.dumps(channel) for channel in record["channels"]] + channels = [ + monitoring_v3.NotificationChannel.from_json(channel_json) + for channel_json in channels_json + ] # Restore the channels. channel_client = monitoring_v3.NotificationChannelServiceClient() @@ -161,16 +194,17 @@ def restore(project_name, backup_filename): for channel in channels: updated = False - print('Updating channel', channel.display_name) + print("Updating channel", channel.display_name) # This field is immutable and it is illegal to specify a # non-default value (UNVERIFIED or VERIFIED) in the # Create() or Update() operations. - channel.verification_status = monitoring_v3.enums.NotificationChannel.\ - VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED + channel.verification_status = ( + monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED + ) if is_same_project: try: - channel_client.update_notification_channel(channel) + channel_client.update_notification_channel(notification_channel=channel) updated = True except google.api_core.exceptions.NotFound: pass # The channel was deleted. Create it below. @@ -178,19 +212,20 @@ def restore(project_name, backup_filename): if not updated: # The channel no longer exists. Recreate it. old_name = channel.name - channel.ClearField("name") + del channel.name new_channel = channel_client.create_notification_channel( - project_name, channel) + name=project_name, notification_channel=channel + ) channel_name_map[old_name] = new_channel.name # Restore the alerts alert_client = monitoring_v3.AlertPolicyServiceClient() for policy in policies: - print('Updating policy', policy.display_name) + print("Updating policy", policy.display_name) # These two fields cannot be set directly, so clear them. - policy.ClearField('creation_record') - policy.ClearField('mutation_record') + del policy.creation_record + del policy.mutation_record # Update old channel names with new channel names. for i, channel in enumerate(policy.notification_channels): @@ -202,7 +237,7 @@ def restore(project_name, backup_filename): if is_same_project: try: - alert_client.update_alert_policy(policy) + alert_client.update_alert_policy(alert_policy=policy) updated = True except google.api_core.exceptions.NotFound: pass # The policy was deleted. Create it below. @@ -214,11 +249,15 @@ def restore(project_name, backup_filename): if not updated: # The policy no longer exists. Recreate it. old_name = policy.name - policy.ClearField("name") + del policy.name for condition in policy.conditions: - condition.ClearField("name") - policy = alert_client.create_alert_policy(project_name, policy) - print('Updated', policy.name) + del condition.name + policy = alert_client.create_alert_policy( + name=project_name, alert_policy=policy + ) + print("Updated", policy.name) + + # [END monitoring_alert_enable_channel] # [END monitoring_alert_restore_policies] # [END monitoring_alert_create_policy] @@ -239,105 +278,87 @@ def project_id(): Returns: str -- the project name """ - project_id = os.environ['GOOGLE_CLOUD_PROJECT'] + project_id = os.environ["GOOGLE_CLOUD_PROJECT"] if not project_id: raise MissingProjectIdError( - 'Set the environment variable ' + - 'GCLOUD_PROJECT to your Google Cloud Project Id.') + "Set the environment variable " + + "GCLOUD_PROJECT to your Google Cloud Project Id." + ) return project_id def project_name(): - return 'projects/' + project_id() + return "projects/" + project_id() -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Demonstrates AlertPolicy API operations.') + description="Demonstrates AlertPolicy API operations." + ) - subparsers = parser.add_subparsers(dest='command') + subparsers = parser.add_subparsers(dest="command") list_alert_policies_parser = subparsers.add_parser( - 'list-alert-policies', - help=list_alert_policies.__doc__ + "list-alert-policies", help=list_alert_policies.__doc__ ) list_notification_channels_parser = subparsers.add_parser( - 'list-notification-channels', - help=list_alert_policies.__doc__ + "list-notification-channels", help=list_alert_policies.__doc__ ) enable_alert_policies_parser = subparsers.add_parser( - 'enable-alert-policies', - help=enable_alert_policies.__doc__ + "enable-alert-policies", help=enable_alert_policies.__doc__ ) enable_alert_policies_parser.add_argument( - '--filter', + "--filter", ) disable_alert_policies_parser = subparsers.add_parser( - 'disable-alert-policies', - help=enable_alert_policies.__doc__ + "disable-alert-policies", help=enable_alert_policies.__doc__ ) disable_alert_policies_parser.add_argument( - '--filter', + "--filter", ) replace_notification_channels_parser = subparsers.add_parser( - 'replace-notification-channels', - help=replace_notification_channels.__doc__ + "replace-notification-channels", help=replace_notification_channels.__doc__ ) replace_notification_channels_parser.add_argument( - '-p', '--alert_policy_id', - required=True + "-p", "--alert_policy_id", required=True ) replace_notification_channels_parser.add_argument( - '-c', '--notification_channel_id', - required=True, - action='append' + "-c", "--notification_channel_id", required=True, action="append" ) - backup_parser = subparsers.add_parser( - 'backup', - help=backup.__doc__ - ) - backup_parser.add_argument( - '--backup_to_filename', - required=True - ) + backup_parser = subparsers.add_parser("backup", help=backup.__doc__) + backup_parser.add_argument("--backup_to_filename", required=True) - restore_parser = subparsers.add_parser( - 'restore', - help=restore.__doc__ - ) - restore_parser.add_argument( - '--restore_from_filename', - required=True - ) + restore_parser = subparsers.add_parser("restore", help=restore.__doc__) + restore_parser.add_argument("--restore_from_filename", required=True) args = parser.parse_args() - if args.command == 'list-alert-policies': + if args.command == "list-alert-policies": list_alert_policies(project_name()) - elif args.command == 'list-notification-channels': + elif args.command == "list-notification-channels": list_notification_channels(project_name()) - elif args.command == 'enable-alert-policies': + elif args.command == "enable-alert-policies": enable_alert_policies(project_name(), enable=True, filter_=args.filter) - elif args.command == 'disable-alert-policies': - enable_alert_policies(project_name(), enable=False, - filter_=args.filter) + elif args.command == "disable-alert-policies": + enable_alert_policies(project_name(), enable=False, filter_=args.filter) - elif args.command == 'replace-notification-channels': - replace_notification_channels(project_name(), args.alert_policy_id, - args.notification_channel_id) + elif args.command == "replace-notification-channels": + replace_notification_channels( + project_name(), args.alert_policy_id, args.notification_channel_id + ) - elif args.command == 'backup': + elif args.command == "backup": backup(project_name(), args.backup_to_filename) - elif args.command == 'restore': + elif args.command == "restore": restore(project_name(), args.restore_from_filename) diff --git a/samples/snippets/v3/alerts-client/snippets_test.py b/samples/snippets/v3/alerts-client/snippets_test.py index 550a8dc9..ca90a170 100644 --- a/samples/snippets/v3/alerts-client/snippets_test.py +++ b/samples/snippets/v3/alerts-client/snippets_test.py @@ -23,7 +23,6 @@ from google.api_core.exceptions import NotFound from google.api_core.exceptions import ServiceUnavailable from google.cloud import monitoring_v3 -import google.protobuf.json_format import pytest from retrying import retry @@ -35,13 +34,11 @@ def random_name(length): - return ''.join( - [random.choice(string.ascii_lowercase) for i in range(length)]) + return "".join([random.choice(string.ascii_lowercase) for i in range(length)]) def retry_on_exceptions(exception): - return isinstance( - exception, (Aborted, ServiceUnavailable, DeadlineExceeded)) + return isinstance(exception, (Aborted, ServiceUnavailable, DeadlineExceeded)) def delay_on_aborted(err, *args): @@ -54,7 +51,7 @@ def delay_on_aborted(err, *args): class PochanFixture: """A test fixture that creates an alert POlicy and a notification CHANnel, - hence the name, pochan. + hence the name, pochan. """ def __init__(self): @@ -62,55 +59,64 @@ def __init__(self): self.project_name = snippets.project_name() self.alert_policy_client = monitoring_v3.AlertPolicyServiceClient() self.notification_channel_client = ( - monitoring_v3.NotificationChannelServiceClient()) + monitoring_v3.NotificationChannelServiceClient() + ) def __enter__(self): - @retry(wait_exponential_multiplier=1000, wait_exponential_max=10000, - stop_max_attempt_number=10, - retry_on_exception=retry_on_exceptions) + @retry( + wait_exponential_multiplier=1000, + wait_exponential_max=10000, + stop_max_attempt_number=10, + retry_on_exception=retry_on_exceptions, + ) def setup(): # Create a policy. - policy = monitoring_v3.types.alert_pb2.AlertPolicy() - json = open('test_alert_policy.json').read() - google.protobuf.json_format.Parse(json, policy) - policy.display_name = 'snippets-test-' + random_name(10) + json = open("test_alert_policy.json").read() + policy = monitoring_v3.AlertPolicy.from_json(json) + policy.display_name = "snippets-test-" + random_name(10) self.alert_policy = self.alert_policy_client.create_alert_policy( - self.project_name, policy) + name=self.project_name, alert_policy=policy + ) # Create a notification channel. - notification_channel = ( - monitoring_v3.types.notification_pb2.NotificationChannel()) - json = open('test_notification_channel.json').read() - google.protobuf.json_format.Parse(json, notification_channel) - notification_channel.display_name = ( - 'snippets-test-' + random_name(10)) + json = open("test_notification_channel.json").read() + notification_channel = monitoring_v3.NotificationChannel.from_json(json) + notification_channel.display_name = "snippets-test-" + random_name(10) self.notification_channel = ( self.notification_channel_client.create_notification_channel( - self.project_name, notification_channel)) + name=self.project_name, notification_channel=notification_channel + ) + ) + setup() return self def __exit__(self, type, value, traceback): # Delete the policy and channel we created. - @retry(wait_exponential_multiplier=1000, wait_exponential_max=10000, - stop_max_attempt_number=10, - retry_on_exception=retry_on_exceptions) + @retry( + wait_exponential_multiplier=1000, + wait_exponential_max=10000, + stop_max_attempt_number=10, + retry_on_exception=retry_on_exceptions, + ) def teardown(): try: self.alert_policy_client.delete_alert_policy( - self.alert_policy.name) + name=self.alert_policy.name + ) except NotFound: print("Ignored NotFound when deleting a policy.") try: if self.notification_channel.name: - self.notification_channel_client\ - .delete_notification_channel( - self.notification_channel.name) + self.notification_channel_client.delete_notification_channel( + self.notification_channel.name + ) except NotFound: print("Ignored NotFound when deleting a channel.") + teardown() -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def pochan(): with PochanFixture() as pochan: yield pochan @@ -132,20 +138,24 @@ def test_enable_alert_policies(capsys, pochan): time.sleep(2) snippets.enable_alert_policies(pochan.project_name, True) out, _ = capsys.readouterr() - assert "Enabled {0}".format(pochan.project_name) in out \ + assert ( + "Enabled {0}".format(pochan.project_name) in out or "{} is already enabled".format(pochan.alert_policy.name) in out + ) time.sleep(2) snippets.enable_alert_policies(pochan.project_name, False) out, _ = capsys.readouterr() - assert "Disabled {}".format(pochan.project_name) in out \ + assert ( + "Disabled {}".format(pochan.project_name) in out or "{} is already disabled".format(pochan.alert_policy.name) in out + ) @pytest.mark.flaky(rerun_filter=delay_on_aborted, max_runs=5) def test_replace_channels(capsys, pochan): - alert_policy_id = pochan.alert_policy.name.split('/')[-1] - notification_channel_id = pochan.notification_channel.name.split('/')[-1] + alert_policy_id = pochan.alert_policy.name.split("/")[-1] + notification_channel_id = pochan.notification_channel.name.split("/")[-1] # This sleep call is for mitigating the following error: # "409 Too many concurrent edits to the project configuration. @@ -154,7 +164,8 @@ def test_replace_channels(capsys, pochan): # See also #3310 time.sleep(2) snippets.replace_notification_channels( - pochan.project_name, alert_policy_id, [notification_channel_id]) + pochan.project_name, alert_policy_id, [notification_channel_id] + ) out, _ = capsys.readouterr() assert "Updated {0}".format(pochan.alert_policy.name) in out @@ -167,20 +178,21 @@ def test_backup_and_restore(capsys, pochan): # Having multiple projects will void this `sleep()` call. # See also #3310 time.sleep(2) - snippets.backup(pochan.project_name, 'backup.json') + snippets.backup(pochan.project_name, "backup.json") out, _ = capsys.readouterr() time.sleep(2) - snippets.restore(pochan.project_name, 'backup.json') + snippets.restore(pochan.project_name, "backup.json") out, _ = capsys.readouterr() assert "Updated {0}".format(pochan.alert_policy.name) in out - assert "Updating channel {0}".format( - pochan.notification_channel.display_name) in out + assert ( + "Updating channel {0}".format(pochan.notification_channel.display_name) in out + ) @pytest.mark.flaky(rerun_filter=delay_on_aborted, max_runs=5) def test_delete_channels(capsys, pochan): - notification_channel_id = pochan.notification_channel.name.split('/')[-1] + notification_channel_id = pochan.notification_channel.name.split("/")[-1] # This sleep call is for mitigating the following error: # "409 Too many concurrent edits to the project configuration. @@ -189,7 +201,8 @@ def test_delete_channels(capsys, pochan): # See also #3310 time.sleep(2) snippets.delete_notification_channels( - pochan.project_name, [notification_channel_id], force=True) + pochan.project_name, [notification_channel_id], force=True + ) out, _ = capsys.readouterr() assert "{0} deleted".format(notification_channel_id) in out - pochan.notification_channel.name = '' # So teardown is not tried + pochan.notification_channel.name = "" # So teardown is not tried diff --git a/samples/snippets/v3/cloud-client/quickstart.py b/samples/snippets/v3/cloud-client/quickstart.py index 0527acae..78088dba 100644 --- a/samples/snippets/v3/cloud-client/quickstart.py +++ b/samples/snippets/v3/cloud-client/quickstart.py @@ -13,31 +13,33 @@ # limitations under the License. -def run_quickstart(): +def run_quickstart(project=""): # [START monitoring_quickstart] from google.cloud import monitoring_v3 import time client = monitoring_v3.MetricServiceClient() - project = 'my-project' # TODO: Update to your project ID. - project_name = client.project_path(project) + # project = 'my-project' # TODO: Update to your project ID. + project_name = f"projects/{project}" - series = monitoring_v3.types.TimeSeries() - series.metric.type = 'custom.googleapis.com/my_metric' - series.resource.type = 'gce_instance' - series.resource.labels['instance_id'] = '1234567890123456789' - series.resource.labels['zone'] = 'us-central1-f' - point = series.points.add() - point.value.double_value = 3.14 + series = monitoring_v3.TimeSeries() + series.metric.type = "custom.googleapis.com/my_metric" + series.resource.type = "gce_instance" + series.resource.labels["instance_id"] = "1234567890123456789" + series.resource.labels["zone"] = "us-central1-f" now = time.time() - point.interval.end_time.seconds = int(now) - point.interval.end_time.nanos = int( - (now - point.interval.end_time.seconds) * 10**9) - client.create_time_series(project_name, [series]) - print('Successfully wrote time series.') + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + {"end_time": {"seconds": seconds, "nanos": nanos}} + ) + point = monitoring_v3.Point({"interval": interval, "value": {"double_value": 3.14}}) + series.points = [point] + client.create_time_series(request={"name": project_name, "time_series": [series]}) + print("Successfully wrote time series.") # [END monitoring_quickstart] -if __name__ == '__main__': +if __name__ == "__main__": run_quickstart() diff --git a/samples/snippets/v3/cloud-client/quickstart_test.py b/samples/snippets/v3/cloud-client/quickstart_test.py index fd0191aa..d7826e92 100644 --- a/samples/snippets/v3/cloud-client/quickstart_test.py +++ b/samples/snippets/v3/cloud-client/quickstart_test.py @@ -15,32 +15,18 @@ import os import backoff -import mock -import pytest import quickstart -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] -@pytest.fixture -def mock_project_path(): - """Mock out project and replace with project from environment.""" - project_patch = mock.patch( - 'google.cloud.monitoring_v3.MetricServiceClient.' - 'project_path') - - with project_patch as project_mock: - project_mock.return_value = 'projects/{}'.format(PROJECT) - yield project_mock - - -def test_quickstart(capsys, mock_project_path): +def test_quickstart(capsys): @backoff.on_exception(backoff.expo, AssertionError, max_time=60) def eventually_consistent_test(): - quickstart.run_quickstart() + quickstart.run_quickstart(PROJECT) out, _ = capsys.readouterr() - assert 'wrote' in out + assert "wrote" in out eventually_consistent_test() diff --git a/samples/snippets/v3/cloud-client/snippets.py b/samples/snippets/v3/cloud-client/snippets.py index 64b3853f..1c0407a2 100644 --- a/samples/snippets/v3/cloud-client/snippets.py +++ b/samples/snippets/v3/cloud-client/snippets.py @@ -18,72 +18,83 @@ import time import uuid +from google.api import metric_pb2 as ga_metric from google.cloud import monitoring_v3 -PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] +PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] def create_metric_descriptor(project_id): # [START monitoring_create_metric] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - descriptor = monitoring_v3.types.MetricDescriptor() - descriptor.type = 'custom.googleapis.com/my_metric' + str(uuid.uuid4()) - descriptor.metric_kind = ( - monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE) - descriptor.value_type = ( - monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE) - descriptor.description = 'This is a simple example of a custom metric.' - descriptor = client.create_metric_descriptor(project_name, descriptor) - print('Created {}.'.format(descriptor.name)) + project_name = f"projects/{project_id}" + descriptor = ga_metric.MetricDescriptor() + descriptor.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) + descriptor.metric_kind = ga_metric.MetricDescriptor.MetricKind.GAUGE + descriptor.value_type = ga_metric.MetricDescriptor.ValueType.DOUBLE + descriptor.description = "This is a simple example of a custom metric." + descriptor = client.create_metric_descriptor( + name=project_name, metric_descriptor=descriptor + ) + print("Created {}.".format(descriptor.name)) # [END monitoring_create_metric] def delete_metric_descriptor(descriptor_name): # [START monitoring_delete_metric] client = monitoring_v3.MetricServiceClient() - client.delete_metric_descriptor(descriptor_name) - print('Deleted metric descriptor {}.'.format(descriptor_name)) + client.delete_metric_descriptor(name=descriptor_name) + print("Deleted metric descriptor {}.".format(descriptor_name)) # [END monitoring_delete_metric] def write_time_series(project_id): # [START monitoring_write_timeseries] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - - series = monitoring_v3.types.TimeSeries() - series.metric.type = 'custom.googleapis.com/my_metric' + str(uuid.uuid4()) - series.resource.type = 'gce_instance' - series.resource.labels['instance_id'] = '1234567890123456789' - series.resource.labels['zone'] = 'us-central1-f' - point = series.points.add() - point.value.double_value = 3.14 + project_name = f"projects/{project_id}" + + series = monitoring_v3.TimeSeries() + series.metric.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) + series.resource.type = "gce_instance" + series.resource.labels["instance_id"] = "1234567890123456789" + series.resource.labels["zone"] = "us-central1-f" now = time.time() - point.interval.end_time.seconds = int(now) - point.interval.end_time.nanos = int( - (now - point.interval.end_time.seconds) * 10**9) - client.create_time_series(project_name, [series]) + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + {"end_time": {"seconds": seconds, "nanos": nanos}} + ) + point = monitoring_v3.Point({"interval": interval, "value": {"double_value": 3.14}}) + series.points = [point] + client.create_time_series(name=project_name, time_series=[series]) # [END monitoring_write_timeseries] def list_time_series(project_id): # [START monitoring_read_timeseries_simple] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - interval = monitoring_v3.types.TimeInterval() + project_name = f"projects/{project_id}" + interval = monitoring_v3.TimeInterval() + now = time.time() - interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) - interval.start_time.seconds = int(now - 1200) - interval.start_time.nanos = interval.end_time.nanos + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + { + "end_time": {"seconds": seconds, "nanos": nanos}, + "start_time": {"seconds": (seconds - 1200), "nanos": nanos}, + } + ) + results = client.list_time_series( - project_name, - 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', - interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL) + request={ + "name": project_name, + "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', + "interval": interval, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, + } + ) for result in results: print(result) # [END monitoring_read_timeseries_simple] @@ -92,19 +103,24 @@ def list_time_series(project_id): def list_time_series_header(project_id): # [START monitoring_read_timeseries_fields] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - interval = monitoring_v3.types.TimeInterval() + project_name = f"projects/{project_id}" now = time.time() - interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) - interval.start_time.seconds = int(now - 1200) - interval.start_time.nanos = interval.end_time.nanos + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + { + "end_time": {"seconds": seconds, "nanos": nanos}, + "start_time": {"seconds": (seconds - 1200), "nanos": nanos}, + } + ) results = client.list_time_series( - project_name, - 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', - interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS) + request={ + "name": project_name, + "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', + "interval": interval, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + } + ) for result in results: print(result) # [END monitoring_read_timeseries_fields] @@ -113,25 +129,33 @@ def list_time_series_header(project_id): def list_time_series_aggregate(project_id): # [START monitoring_read_timeseries_align] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - interval = monitoring_v3.types.TimeInterval() + project_name = f"projects/{project_id}" + now = time.time() - interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) - interval.start_time.seconds = int(now - 3600) - interval.start_time.nanos = interval.end_time.nanos - aggregation = monitoring_v3.types.Aggregation() - aggregation.alignment_period.seconds = 1200 # 20 minutes - aggregation.per_series_aligner = ( - monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + { + "end_time": {"seconds": seconds, "nanos": nanos}, + "start_time": {"seconds": (seconds - 3600), "nanos": nanos}, + } + ) + aggregation = monitoring_v3.Aggregation( + { + "alignment_period": {"seconds": 1200}, # 20 minutes + "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN, + } + ) results = client.list_time_series( - project_name, - 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', - interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, - aggregation) + request={ + "name": project_name, + "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', + "interval": interval, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, + "aggregation": aggregation, + } + ) for result in results: print(result) # [END monitoring_read_timeseries_align] @@ -140,28 +164,35 @@ def list_time_series_aggregate(project_id): def list_time_series_reduce(project_id): # [START monitoring_read_timeseries_reduce] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - interval = monitoring_v3.types.TimeInterval() + project_name = f"projects/{project_id}" + now = time.time() - interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) - interval.start_time.seconds = int(now - 3600) - interval.start_time.nanos = interval.end_time.nanos - aggregation = monitoring_v3.types.Aggregation() - aggregation.alignment_period.seconds = 1200 # 20 minutes - aggregation.per_series_aligner = ( - monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) - aggregation.cross_series_reducer = ( - monitoring_v3.enums.Aggregation.Reducer.REDUCE_MEAN) - aggregation.group_by_fields.append('resource.zone') + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + { + "end_time": {"seconds": seconds, "nanos": nanos}, + "start_time": {"seconds": (seconds - 3600), "nanos": nanos}, + } + ) + aggregation = monitoring_v3.Aggregation( + { + "alignment_period": {"seconds": 1200}, # 20 minutes + "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN, + "cross_series_reducer": monitoring_v3.Aggregation.Reducer.REDUCE_MEAN, + "group_by_fields": ["resource.zone"], + } + ) results = client.list_time_series( - project_name, - 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', - interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, - aggregation) + request={ + "name": project_name, + "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', + "interval": interval, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, + "aggregation": aggregation, + } + ) for result in results: print(result) # [END monitoring_read_timeseries_reduce] @@ -170,8 +201,8 @@ def list_time_series_reduce(project_id): def list_metric_descriptors(project_id): # [START monitoring_list_descriptors] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - for descriptor in client.list_metric_descriptors(project_name): + project_name = f"projects/{project_id}" + for descriptor in client.list_metric_descriptors(name=project_name): print(descriptor.type) # [END monitoring_list_descriptors] @@ -179,9 +210,8 @@ def list_metric_descriptors(project_id): def list_monitored_resources(project_id): # [START monitoring_list_resources] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - resource_descriptors = ( - client.list_monitored_resource_descriptors(project_name)) + project_name = f"projects/{project_id}" + resource_descriptors = client.list_monitored_resource_descriptors(name=project_name) for descriptor in resource_descriptors: print(descriptor.type) # [END monitoring_list_resources] @@ -190,121 +220,109 @@ def list_monitored_resources(project_id): def get_monitored_resource_descriptor(project_id, resource_type_name): # [START monitoring_get_resource] client = monitoring_v3.MetricServiceClient() - resource_path = client.monitored_resource_descriptor_path( - project_id, resource_type_name) - pprint.pprint(client.get_monitored_resource_descriptor(resource_path)) + resource_path = ( + f"projects/{project_id}/monitoredResourceDescriptors/{resource_type_name}" + ) + pprint.pprint(client.get_monitored_resource_descriptor(name=resource_path)) # [END monitoring_get_resource] def get_metric_descriptor(metric_name): # [START monitoring_get_descriptor] client = monitoring_v3.MetricServiceClient() - descriptor = client.get_metric_descriptor(metric_name) + descriptor = client.get_metric_descriptor(name=metric_name) pprint.pprint(descriptor) # [END monitoring_get_descriptor] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Demonstrates Monitoring API operations.') + description="Demonstrates Monitoring API operations." + ) - subparsers = parser.add_subparsers(dest='command') + subparsers = parser.add_subparsers(dest="command") create_metric_descriptor_parser = subparsers.add_parser( - 'create-metric-descriptor', - help=create_metric_descriptor.__doc__ + "create-metric-descriptor", help=create_metric_descriptor.__doc__ ) list_metric_descriptor_parser = subparsers.add_parser( - 'list-metric-descriptors', - help=list_metric_descriptors.__doc__ + "list-metric-descriptors", help=list_metric_descriptors.__doc__ ) get_metric_descriptor_parser = subparsers.add_parser( - 'get-metric-descriptor', - help=get_metric_descriptor.__doc__ + "get-metric-descriptor", help=get_metric_descriptor.__doc__ ) get_metric_descriptor_parser.add_argument( - '--metric-type-name', - help='The metric type of the metric descriptor to see details about.', - required=True + "--metric-type-name", + help="The metric type of the metric descriptor to see details about.", + required=True, ) delete_metric_descriptor_parser = subparsers.add_parser( - 'delete-metric-descriptor', - help=list_metric_descriptors.__doc__ + "delete-metric-descriptor", help=list_metric_descriptors.__doc__ ) delete_metric_descriptor_parser.add_argument( - '--metric-descriptor-name', - help='Metric descriptor to delete', - required=True + "--metric-descriptor-name", help="Metric descriptor to delete", required=True ) list_resources_parser = subparsers.add_parser( - 'list-resources', - help=list_monitored_resources.__doc__ + "list-resources", help=list_monitored_resources.__doc__ ) get_resource_parser = subparsers.add_parser( - 'get-resource', - help=get_monitored_resource_descriptor.__doc__ + "get-resource", help=get_monitored_resource_descriptor.__doc__ ) get_resource_parser.add_argument( - '--resource-type-name', - help='Monitored resource to view more information about.', - required=True + "--resource-type-name", + help="Monitored resource to view more information about.", + required=True, ) write_time_series_parser = subparsers.add_parser( - 'write-time-series', - help=write_time_series.__doc__ + "write-time-series", help=write_time_series.__doc__ ) list_time_series_parser = subparsers.add_parser( - 'list-time-series', - help=list_time_series.__doc__ + "list-time-series", help=list_time_series.__doc__ ) list_time_series_header_parser = subparsers.add_parser( - 'list-time-series-header', - help=list_time_series_header.__doc__ + "list-time-series-header", help=list_time_series_header.__doc__ ) read_time_series_reduce = subparsers.add_parser( - 'list-time-series-reduce', - help=list_time_series_reduce.__doc__ + "list-time-series-reduce", help=list_time_series_reduce.__doc__ ) read_time_series_aggregate = subparsers.add_parser( - 'list-time-series-aggregate', - help=list_time_series_aggregate.__doc__ + "list-time-series-aggregate", help=list_time_series_aggregate.__doc__ ) args = parser.parse_args() - if args.command == 'create-metric-descriptor': + if args.command == "create-metric-descriptor": create_metric_descriptor(PROJECT_ID) - if args.command == 'list-metric-descriptors': + if args.command == "list-metric-descriptors": list_metric_descriptors(PROJECT_ID) - if args.command == 'get-metric-descriptor': + if args.command == "get-metric-descriptor": get_metric_descriptor(args.metric_type_name) - if args.command == 'delete-metric-descriptor': + if args.command == "delete-metric-descriptor": delete_metric_descriptor(args.metric_descriptor_name) - if args.command == 'list-resources': + if args.command == "list-resources": list_monitored_resources(PROJECT_ID) - if args.command == 'get-resource': - get_monitored_resource_descriptor( - PROJECT_ID, args.resource_type_name) - if args.command == 'write-time-series': + if args.command == "get-resource": + get_monitored_resource_descriptor(PROJECT_ID, args.resource_type_name) + if args.command == "write-time-series": write_time_series(PROJECT_ID) - if args.command == 'list-time-series': + if args.command == "list-time-series": list_time_series(PROJECT_ID) - if args.command == 'list-time-series-header': + if args.command == "list-time-series-header": list_time_series_header(PROJECT_ID) - if args.command == 'list-time-series-reduce': + if args.command == "list-time-series-reduce": list_time_series_reduce(PROJECT_ID) - if args.command == 'list-time-series-aggregate': + if args.command == "list-time-series-aggregate": list_time_series_aggregate(PROJECT_ID) diff --git a/samples/snippets/v3/cloud-client/snippets_test.py b/samples/snippets/v3/cloud-client/snippets_test.py index 5aabbda8..d6c7d07a 100644 --- a/samples/snippets/v3/cloud-client/snippets_test.py +++ b/samples/snippets/v3/cloud-client/snippets_test.py @@ -23,14 +23,14 @@ import snippets -PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] +PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] @pytest.fixture(scope="function") def custom_metric_descriptor(capsys): snippets.create_metric_descriptor(PROJECT_ID) out, _ = capsys.readouterr() - match = re.search(r'Created (.*)\.', out) + match = re.search(r"Created (.*)\.", out) metric_name = match.group(1) yield metric_name @@ -43,7 +43,6 @@ def custom_metric_descriptor(capsys): @pytest.fixture(scope="module") def write_time_series(): - @backoff.on_exception(backoff.expo, InternalServerError, max_time=120) def write(): snippets.write_time_series(PROJECT_ID) @@ -54,64 +53,63 @@ def write(): def test_get_delete_metric_descriptor(capsys, custom_metric_descriptor): try: - @backoff.on_exception( - backoff.expo, (AssertionError, NotFound), max_time=60) + + @backoff.on_exception(backoff.expo, (AssertionError, NotFound), max_time=60) def eventually_consistent_test(): snippets.get_metric_descriptor(custom_metric_descriptor) out, _ = capsys.readouterr() - assert 'DOUBLE' in out + assert "DOUBLE" in out eventually_consistent_test() finally: snippets.delete_metric_descriptor(custom_metric_descriptor) out, _ = capsys.readouterr() - assert 'Deleted metric' in out + assert "Deleted metric" in out def test_list_metric_descriptors(capsys): snippets.list_metric_descriptors(PROJECT_ID) out, _ = capsys.readouterr() - assert 'logging.googleapis.com/byte_count' in out + assert "logging.googleapis.com/byte_count" in out def test_list_resources(capsys): snippets.list_monitored_resources(PROJECT_ID) out, _ = capsys.readouterr() - assert 'pubsub_topic' in out + assert "pubsub_topic" in out def test_get_resources(capsys): - snippets.get_monitored_resource_descriptor( - PROJECT_ID, 'pubsub_topic') + snippets.get_monitored_resource_descriptor(PROJECT_ID, "pubsub_topic") out, _ = capsys.readouterr() - assert 'A topic in Google Cloud Pub/Sub' in out + assert "A topic in Google Cloud Pub/Sub" in out def test_list_time_series(capsys, write_time_series): snippets.list_time_series(PROJECT_ID) out, _ = capsys.readouterr() - assert 'gce_instance' in out + assert "gce_instance" in out def test_list_time_series_header(capsys, write_time_series): snippets.list_time_series_header(PROJECT_ID) out, _ = capsys.readouterr() - assert 'gce_instance' in out + assert "gce_instance" in out def test_list_time_series_aggregate(capsys, write_time_series): snippets.list_time_series_aggregate(PROJECT_ID) out, _ = capsys.readouterr() - assert 'points' in out - assert 'interval' in out - assert 'start_time' in out - assert 'end_time' in out + assert "points" in out + assert "interval" in out + assert "start_time" in out + assert "end_time" in out def test_list_time_series_reduce(capsys, write_time_series): snippets.list_time_series_reduce(PROJECT_ID) out, _ = capsys.readouterr() - assert 'points' in out - assert 'interval' in out - assert 'start_time' in out - assert 'end_time' in out + assert "points" in out + assert "interval" in out + assert "start_time" in out + assert "end_time" in out diff --git a/samples/snippets/v3/uptime-check-client/snippets.py b/samples/snippets/v3/uptime-check-client/snippets.py index dcde3b58..0970b8e7 100644 --- a/samples/snippets/v3/uptime-check-client/snippets.py +++ b/samples/snippets/v3/uptime-check-client/snippets.py @@ -19,48 +19,51 @@ import pprint from google.cloud import monitoring_v3 +from google.protobuf import field_mask_pb2 import tabulate # [START monitoring_uptime_check_create] def create_uptime_check_config_get(project_name, host_name=None, display_name=None): - config = monitoring_v3.types.uptime_pb2.UptimeCheckConfig() + config = monitoring_v3.UptimeCheckConfig() config.display_name = display_name or "New GET uptime check" - config.monitored_resource.type = "uptime_url" - config.monitored_resource.labels.update({"host": host_name or "example.com"}) - config.http_check.request_method = ( - monitoring_v3.enums.UptimeCheckConfig.HttpCheck.RequestMethod.GET - ) - config.http_check.path = "/" - config.http_check.port = 80 - config.timeout.seconds = 10 - config.period.seconds = 300 + config.monitored_resource = { + "type": "uptime_url", + "labels": {"host": host_name or "example.com"} + } + config.http_check = { + "request_method": monitoring_v3.UptimeCheckConfig.HttpCheck.RequestMethod.GET, + "path": "/", + "port": 80 + } + config.timeout = {"seconds": 10} + config.period = {"seconds": 300} client = monitoring_v3.UptimeCheckServiceClient() - new_config = client.create_uptime_check_config(project_name, config) + new_config = client.create_uptime_check_config(request={"parent": project_name, "uptime_check_config": config}) pprint.pprint(new_config) return new_config def create_uptime_check_config_post(project_name, host_name=None, display_name=None): - config = monitoring_v3.types.uptime_pb2.UptimeCheckConfig() + config = monitoring_v3.UptimeCheckConfig() config.display_name = display_name or "New POST uptime check" - config.monitored_resource.type = "uptime_url" - config.monitored_resource.labels.update({"host": host_name or "example.com"}) - config.http_check.request_method = ( - monitoring_v3.enums.UptimeCheckConfig.HttpCheck.RequestMethod.POST - ) - config.http_check.content_type = ( - monitoring_v3.enums.UptimeCheckConfig.HttpCheck.ContentType.URL_ENCODED - ) - config.http_check.body = "foo=bar".encode("utf-8") - config.http_check.path = "/" - config.http_check.port = 80 - config.timeout.seconds = 10 - config.period.seconds = 300 + config.monitored_resource = { + "type": "uptime_url", + "labels": {"host": host_name or "example.com"} + } + config.http_check = { + "request_method": monitoring_v3.UptimeCheckConfig.HttpCheck.RequestMethod.POST, + "content_type": monitoring_v3.UptimeCheckConfig.HttpCheck.ContentType.URL_ENCODED, + "body": "foo=bar".encode("utf-8"), + "path": "/", + "port": 80 + } + config.timeout = {"seconds": 10} + config.period = {"seconds": 300} client = monitoring_v3.UptimeCheckServiceClient() - new_config = client.create_uptime_check_config(project_name, config) + new_config = client.create_uptime_check_config(request={"parent": project_name, "uptime_check_config": config}) pprint.pprint(new_config) return new_config @@ -72,15 +75,15 @@ def update_uptime_check_config( config_name, new_display_name=None, new_http_check_path=None ): client = monitoring_v3.UptimeCheckServiceClient() - config = client.get_uptime_check_config(config_name) - field_mask = monitoring_v3.types.FieldMask() + config = client.get_uptime_check_config(request={"name": config_name}) + field_mask = field_mask_pb2.FieldMask() if new_display_name: field_mask.paths.append("display_name") config.display_name = new_display_name if new_http_check_path: field_mask.paths.append("http_check.path") config.http_check.path = new_http_check_path - client.update_uptime_check_config(config, field_mask) + client.update_uptime_check_config(request={"uptime_check_config": config, "update_mask": field_mask}) # [END monitoring_uptime_check_update] @@ -89,7 +92,7 @@ def update_uptime_check_config( # [START monitoring_uptime_check_list_configs] def list_uptime_check_configs(project_name): client = monitoring_v3.UptimeCheckServiceClient() - configs = client.list_uptime_check_configs(project_name) + configs = client.list_uptime_check_configs(request={"parent": project_name}) for config in configs: pprint.pprint(config) @@ -101,7 +104,7 @@ def list_uptime_check_configs(project_name): # [START monitoring_uptime_check_list_ips] def list_uptime_check_ips(): client = monitoring_v3.UptimeCheckServiceClient() - ips = client.list_uptime_check_ips() + ips = client.list_uptime_check_ips(request={}) print( tabulate.tabulate( [(ip.region, ip.location, ip.ip_address) for ip in ips], @@ -116,7 +119,7 @@ def list_uptime_check_ips(): # [START monitoring_uptime_check_get] def get_uptime_check_config(config_name): client = monitoring_v3.UptimeCheckServiceClient() - config = client.get_uptime_check_config(config_name) + config = client.get_uptime_check_config(request={"name": config_name}) pprint.pprint(config) @@ -128,7 +131,7 @@ def get_uptime_check_config(config_name): # See https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.uptimeCheckConfigs#UptimeCheckConfig. def delete_uptime_check_config(config_name): client = monitoring_v3.UptimeCheckServiceClient() - client.delete_uptime_check_config(config_name) + client.delete_uptime_check_config(request={"name": config_name}) print("Deleted ", config_name) diff --git a/scripts/fixup_monitoring_v3_keywords.py b/scripts/fixup_monitoring_v3_keywords.py new file mode 100644 index 00000000..b07ce8b3 --- /dev/null +++ b/scripts/fixup_monitoring_v3_keywords.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class monitoringCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'create_alert_policy': ('name', 'alert_policy', ), + 'create_group': ('name', 'group', 'validate_only', ), + 'create_metric_descriptor': ('name', 'metric_descriptor', ), + 'create_notification_channel': ('name', 'notification_channel', ), + 'create_service': ('parent', 'service', 'service_id', ), + 'create_service_level_objective': ('parent', 'service_level_objective', 'service_level_objective_id', ), + 'create_time_series': ('name', 'time_series', ), + 'create_uptime_check_config': ('parent', 'uptime_check_config', ), + 'delete_alert_policy': ('name', ), + 'delete_group': ('name', 'recursive', ), + 'delete_metric_descriptor': ('name', ), + 'delete_notification_channel': ('name', 'force', ), + 'delete_service': ('name', ), + 'delete_service_level_objective': ('name', ), + 'delete_uptime_check_config': ('name', ), + 'get_alert_policy': ('name', ), + 'get_group': ('name', ), + 'get_metric_descriptor': ('name', ), + 'get_monitored_resource_descriptor': ('name', ), + 'get_notification_channel': ('name', ), + 'get_notification_channel_descriptor': ('name', ), + 'get_notification_channel_verification_code': ('name', 'expire_time', ), + 'get_service': ('name', ), + 'get_service_level_objective': ('name', 'view', ), + 'get_uptime_check_config': ('name', ), + 'list_alert_policies': ('name', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_group_members': ('name', 'page_size', 'page_token', 'filter', 'interval', ), + 'list_groups': ('name', 'children_of_group', 'ancestors_of_group', 'descendants_of_group', 'page_size', 'page_token', ), + 'list_metric_descriptors': ('name', 'filter', 'page_size', 'page_token', ), + 'list_monitored_resource_descriptors': ('name', 'filter', 'page_size', 'page_token', ), + 'list_notification_channel_descriptors': ('name', 'page_size', 'page_token', ), + 'list_notification_channels': ('name', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_service_level_objectives': ('parent', 'filter', 'page_size', 'page_token', 'view', ), + 'list_services': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_time_series': ('name', 'filter', 'interval', 'view', 'aggregation', 'order_by', 'page_size', 'page_token', ), + 'list_uptime_check_configs': ('parent', 'page_size', 'page_token', ), + 'list_uptime_check_ips': ('page_size', 'page_token', ), + 'send_notification_channel_verification_code': ('name', ), + 'update_alert_policy': ('alert_policy', 'update_mask', ), + 'update_group': ('group', 'validate_only', ), + 'update_notification_channel': ('notification_channel', 'update_mask', ), + 'update_service': ('service', 'update_mask', ), + 'update_service_level_objective': ('service_level_objective', 'update_mask', ), + 'update_uptime_check_config': ('uptime_check_config', 'update_mask', ), + 'verify_notification_channel': ('name', 'code', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=monitoringCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the monitoring client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/setup.py b/setup.py index 13c30600..3effb150 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,11 @@ # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" -dependencies = ["google-api-core[grpc] >= 1.14.0, < 2.0.0dev"] +dependencies = [ + "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", + "proto-plus >= 1.4.0", + "libcst >= 0.2.5", +] extras = {"pandas": "pandas >= 0.17.1"} @@ -43,7 +47,9 @@ # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ - package for package in setuptools.find_packages() if package.startswith("google") + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") ] # Determine which namespaces are needed. @@ -66,12 +72,10 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "Topic :: Internet", ], @@ -80,7 +84,8 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.6", + scripts=["scripts/fixup_monitoring_v3_keywords.py"], include_package_data=True, zip_safe=False, ) diff --git a/synth.metadata b/synth.metadata index da924e97..3b6b973b 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,21 +4,21 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-monitoring.git", - "sha": "85a1420dbb23011681072e425a80e1130dac69e8" + "sha": "605ffe066f3154239405e729b7cbfb7ecd69917f" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "da29da32b3a988457b49ae290112b74f14b713cc" + "sha": "0c868d49b8e05bc1f299bc773df9eb4ef9ed96e9" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "da29da32b3a988457b49ae290112b74f14b713cc" + "sha": "0c868d49b8e05bc1f299bc773df9eb4ef9ed96e9" } } ], diff --git a/synth.py b/synth.py index 3367fa8b..cb722a3a 100644 --- a/synth.py +++ b/synth.py @@ -34,96 +34,50 @@ version="v3", bazel_target="//google/monitoring/v3:monitoring-v3-py", include_protos=True, + proto_output_path="google/cloud/monitoring_v3/proto" ) # don't copy nox.py, setup.py, README.rst, docs/index.rst excludes = ["nox.py", "setup.py", "README.rst", "docs/index.rst"] s.move(v3_library, excludes=excludes) -# metadata in tests in none but should be empty list. -# https://github.com/googleapis/gapic-generator/issues/2014 -s.replace( - "google/cloud/*/gapic/*_client.py", - 'def .*\(([^\)]+)\n.*metadata=None\):\n\s+"""(.*\n)*?\s+"""\n', - "\g<0>" - " if metadata is None:\n" - " metadata = []\n" - " metadata = list(metadata)\n", -) - -# Issues exist where python files should defined the source encoding -# https://github.com/googleapis/gapic-generator/issues/2097 -files = ["google/cloud/monitoring_v3/proto/common_pb2.py"] -for f in files: - s.replace(f, r"(^.*$\n)*", r"# -*- coding: utf-8 -*-\n\g<0>") - -# GAPIC-Generator is mangling some docstrings -# Missing blank line after bulleted list -s.replace( - "google/cloud/monitoring_v3/gapic/alert_policy_service_client.py", - "then a new `\[CONDITION_ID\]` is created.\n", - "\g<0>\n", -) - -s.replace( - "google/cloud/monitoring_v3/gapic/alert_policy_service_client.py", - " ::\n\n", - "", -) - - -s.replace( - "google/cloud/**/service_pb2.py", - """is: :: projects/\[PROJECT_ID_OR_NUMBER\]/services/\[SERVICE_ - ID\]/serviceLevelObjectives/\[SLO_NAME\]""", - """is: :: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]""" -) - -s.replace( - "google/cloud/**/metric_service_pb2.py", - """:: resource\.type = - starts_with\("gce_"\) AND resource\.label:id""", - """:: - - resource.type = starts_with("gce_") AND resource.label:id""" -) - -s.replace( - "google/cloud/**/alert_pb2.py", - """:: projects/\[PROJECT_ID_ - OR_NUMBER\]/notificationChannels/\[CHANNEL_ID\]""", - """projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]""" -) - -# Deal with long lines due to long proto name -s.replace( - ["google/cloud/monitoring_v3/__init__.py"], - "from google.cloud.monitoring_v3.gapic import " - "notification_channel_service_client\n", - "from google.cloud.monitoring_v3.gapic import (\n" - " notification_channel_service_client as notification_client)\n", -) -s.replace( - ["google/cloud/monitoring_v3/__init__.py"], - "notification_channel_service_client.NotificationChannelServiceClient", - "notification_client.NotificationChannelServiceClient", -) - +# Synth hack due to googleapis and python-api-common-protos out of sync. +for pattern in [ + "monitored_resource_types=\['monitored_resource_types_value'\],", + "assert response.monitored_resource_types == \['monitored_resource_types_value'\]", + "launch_stage=launch_stage.LaunchStage.UNIMPLEMENTED,", + "assert response.launch_stage == launch_stage.LaunchStage.UNIMPLEMENTED", +]: + s.replace( + "tests/unit/gapic/monitoring_v3/test_*.py", + pattern, + "" + ) + +# Synth hack due to microgenerator uses "type_" while api-common-protos uses "type". +for file in ["test_uptime_check_service.py", "test_metric_service.py"]: + s.replace( + f"tests/unit/gapic/monitoring_v3/{file}", + "type_", + "type" + ) # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(cov_level=92, samples=True) -s.move(templated_files) +templated_files = common.py_library( + samples=True, # set to True only if there are samples + microgenerator=True, + cov_level=99 +) +s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- python.py_samples(skip_readmes=True) - -# TODO(busunkim): Use latest sphinx after microgenerator transition -s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') - +# Don't treat warnings as errors. +s.replace("noxfile.py", '[\"\']-W[\"\']', '# "-W"') s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/system/gapic/v3/test_system_metric_service_v3.py b/tests/system/gapic/v3/test_system_metric_service_v3.py index 245501f1..c268729d 100644 --- a/tests/system/gapic/v3/test_system_metric_service_v3.py +++ b/tests/system/gapic/v3/test_system_metric_service_v3.py @@ -18,7 +18,6 @@ import time from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3.proto import metric_service_pb2 class TestSystemMetricService(object): @@ -26,5 +25,5 @@ def test_list_monitored_resource_descriptors(self): project_id = os.environ["PROJECT_ID"] client = monitoring_v3.MetricServiceClient() - name = client.project_path(project_id) - response = client.list_monitored_resource_descriptors(name) + name = f"projects/{project_id}" + response = client.list_monitored_resource_descriptors(name=name) diff --git a/tests/system/test_vpcsc_v3.py b/tests/system/test_vpcsc_v3.py index 61af0ac3..e2e1ac7d 100644 --- a/tests/system/test_vpcsc_v3.py +++ b/tests/system/test_vpcsc_v3.py @@ -24,7 +24,6 @@ from google.api_core import exceptions from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3 import enums from test_utils.vpcsc_config import vpcsc_config @@ -38,12 +37,12 @@ def aps_client(): @pytest.fixture(scope="module") def name_inside(aps_client): - return aps_client.project_path(vpcsc_config.project_inside) + return f"projects/{vpcsc_config.project_inside}" @pytest.fixture(scope="module") def name_outside(aps_client): - return aps_client.project_path(vpcsc_config.project_outside) + return f"projects/{vpcsc_config.project_outside}" @pytest.fixture(scope="module") @@ -63,59 +62,67 @@ class TestCRUDAlertPolicies(object): @staticmethod def test_create_alert_policy_inside(aps_client, name_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - aps_client.create_alert_policy(name_inside, {}) + aps_client.create_alert_policy( + request={"name": name_inside, "alert_policy": {}} + ) @staticmethod def test_create_alert_policy_outside(aps_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - aps_client.create_alert_policy(name_outside, {}) + aps_client.create_alert_policy( + request={"name": name_outside, "alert_policy": {}} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_list_alert_policies_inside(aps_client, name_inside): - list(aps_client.list_alert_policies(name_inside)) + list(aps_client.list_alert_policies(request={"name": name_inside})) @staticmethod def test_list_alert_policies_outside(aps_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - list(aps_client.list_alert_policies(name_outside)) + list(aps_client.list_alert_policies(request={"name": name_outside})) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_get_alert_policy_inside(aps_client, alert_policy_path_inside): with pytest.raises(exceptions.NotFound): # no perms issue - aps_client.get_alert_policy(alert_policy_path_inside) + aps_client.get_alert_policy(request={"name": alert_policy_path_inside}) @staticmethod def test_get_alert_policy_outside(aps_client, alert_policy_path_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - aps_client.get_alert_policy(alert_policy_path_outside) + aps_client.get_alert_policy(request={"name": alert_policy_path_outside}) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_update_alert_policy_inside(aps_client, alert_policy_path_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - aps_client.update_alert_policy({"name": alert_policy_path_inside}) + aps_client.update_alert_policy( + request={"alert_policy": {"name": alert_policy_path_inside}} + ) @staticmethod def test_update_alert_policy_outside(aps_client, alert_policy_path_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - aps_client.update_alert_policy({"name": alert_policy_path_outside}) + aps_client.update_alert_policy( + request={"alert_policy": {"name": alert_policy_path_outside}} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_delete_alert_policy_inside(aps_client, alert_policy_path_inside): with pytest.raises(exceptions.NotFound): # no perms issue - aps_client.delete_alert_policy(alert_policy_path_inside) + aps_client.delete_alert_policy(request={"name": alert_policy_path_inside}) @staticmethod def test_delete_alert_policy_outside(aps_client, alert_policy_path_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - aps_client.delete_alert_policy(alert_policy_path_outside) + aps_client.delete_alert_policy(request={"name": alert_policy_path_outside}) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -142,71 +149,71 @@ class TestCRUDGroups(object): @staticmethod def test_create_group_inside(gs_client, name_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - gs_client.create_group(name_inside, {}) + gs_client.create_group(request={"name": name_inside, "group": {}}) @staticmethod def test_create_group_outside(gs_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - gs_client.create_group(name_outside, {}) + gs_client.create_group(request={"name": name_outside, "group": {}}) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_list_groups_inside(gs_client, name_inside): - list(gs_client.list_groups(name_inside)) + list(gs_client.list_groups(request={"name": name_inside})) @staticmethod def test_list_groups_outside(gs_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - list(gs_client.list_groups(name_outside)) + list(gs_client.list_groups(request={"name": name_outside})) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_get_group_inside(gs_client, group_path_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - gs_client.get_group(group_path_inside) + gs_client.get_group(request={"name": group_path_inside}) @staticmethod def test_get_group_outside(gs_client, group_path_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - gs_client.get_group(group_path_outside) + gs_client.get_group(request={"name": group_path_outside}) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_list_group_members_inside(gs_client, group_path_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - list(gs_client.list_group_members(group_path_inside)) + list(gs_client.list_group_members(request={"name": group_path_inside})) @staticmethod def test_list_group_members_outside(gs_client, group_path_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - list(gs_client.list_group_members(group_path_outside)) + list(gs_client.list_group_members(request={"name": group_path_outside})) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_update_group_inside(gs_client, group_path_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - gs_client.update_group({"name": group_path_inside}) + gs_client.update_group(request={"group": {"name": group_path_inside}}) @staticmethod def test_update_group_outside(gs_client, group_path_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - gs_client.update_group({"name": group_path_outside}) + gs_client.update_group(request={"group": {"name": group_path_outside}}) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_delete_group_inside(gs_client, group_path_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - gs_client.delete_group(group_path_inside) + gs_client.delete_group(request={"name": group_path_inside}) @staticmethod def test_delete_group_outside(gs_client, group_path_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - gs_client.delete_group(group_path_outside) + gs_client.delete_group(request={"name": group_path_outside}) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -237,49 +244,61 @@ class TestCRUDMetricDescriptors(object): @staticmethod def test_create_metric_descriptor_inside(ms_client, name_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - ms_client.create_metric_descriptor(name_inside, {}) + ms_client.create_metric_descriptor( + request={"name": name_inside, "metric_descriptor": {}} + ) @staticmethod def test_create_metric_descriptor_outside(ms_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - ms_client.create_metric_descriptor(name_outside, {}) + ms_client.create_metric_descriptor( + request={"name": name_outside, "metric_descriptor": {}} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_list_metric_descriptors_inside(ms_client, name_inside): - list(ms_client.list_metric_descriptors(name_inside)) + list(ms_client.list_metric_descriptors(request={"name": name_inside})) @staticmethod def test_list_metric_descriptors_outside(ms_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - list(ms_client.list_metric_descriptors(name_outside)) + list(ms_client.list_metric_descriptors(request={"name": name_outside})) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_get_metric_descriptor_inside(ms_client, metric_descriptor_path_inside): with pytest.raises(exceptions.NotFound): # no perms issue - ms_client.get_metric_descriptor(metric_descriptor_path_inside) + ms_client.get_metric_descriptor( + request={"name": metric_descriptor_path_inside} + ) @staticmethod def test_get_metric_descriptor_outside(ms_client, metric_descriptor_path_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - ms_client.get_metric_descriptor(metric_descriptor_path_outside) + ms_client.get_metric_descriptor( + request={"name": metric_descriptor_path_outside} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_delete_metric_descriptor_inside(ms_client, metric_descriptor_path_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - ms_client.delete_metric_descriptor(metric_descriptor_path_inside) + ms_client.delete_metric_descriptor( + request={"name": metric_descriptor_path_inside} + ) @staticmethod def test_delete_metric_descriptor_outside( ms_client, metric_descriptor_path_outside ): with pytest.raises(exceptions.PermissionDenied) as exc: - ms_client.delete_metric_descriptor(metric_descriptor_path_outside) + ms_client.delete_metric_descriptor( + request={"name": metric_descriptor_path_outside} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -289,12 +308,16 @@ class TestCRUDTimeSeries(object): @staticmethod def test_create_time_series_inside(ms_client, name_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - ms_client.create_time_series(name_inside, {}) + ms_client.create_time_series( + request={"name": name_inside, "time_series": {}} + ) @staticmethod def test_create_time_series_outside(ms_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - ms_client.create_time_series(name_outside, {}) + ms_client.create_time_series( + request={"name": name_outside, "time_series": {}} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -303,7 +326,12 @@ def test_list_time_series_inside(ms_client, name_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue list( ms_client.list_time_series( - name_inside, "", {}, enums.ListTimeSeriesRequest.TimeSeriesView.FULL + request={ + "name": name_inside, + "filter": "", + "interval": {}, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, + } ) ) @@ -312,10 +340,12 @@ def test_list_time_series_outside(ms_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: list( ms_client.list_time_series( - name_outside, - "", - {}, - enums.ListTimeSeriesRequest.TimeSeriesView.FULL, + request={ + "name": name_outside, + "filter": "", + "interval": {}, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, + } ) ) @@ -342,12 +372,18 @@ def monitored_resource_descriptor_path_outside(ms_client): class TestCRUDMonitoredResourceDescriptor(object): @staticmethod def test_list_monitored_resource_descriptors_inside(ms_client, name_inside): - list(ms_client.list_monitored_resource_descriptors(name_inside)) + list( + ms_client.list_monitored_resource_descriptors(request={"name": name_inside}) + ) @staticmethod def test_list_monitored_resource_descriptors_outside(ms_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - list(ms_client.list_monitored_resource_descriptors(name_outside)) + list( + ms_client.list_monitored_resource_descriptors( + request={"name": name_outside} + ) + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -357,7 +393,7 @@ def test_get_monitored_resource_descriptor_inside( ): with pytest.raises(exceptions.NotFound): # no perms issue ms_client.get_monitored_resource_descriptor( - monitored_resource_descriptor_path_inside + request={"name": monitored_resource_descriptor_path_inside} ) @staticmethod @@ -366,7 +402,7 @@ def test_get_monitored_resource_descriptor_outside( ): with pytest.raises(exceptions.PermissionDenied) as exc: ms_client.get_monitored_resource_descriptor( - monitored_resource_descriptor_path_outside + request={"name": monitored_resource_descriptor_path_outside} ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -414,23 +450,27 @@ class TestCRUDNotificationChannels(object): @staticmethod def test_create_notification_channel_inside(ncs_client, name_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - ncs_client.create_notification_channel(name_inside, {}) + ncs_client.create_notification_channel( + request={"name": name_inside, "notification_channel": {}} + ) @staticmethod def test_create_notification_channel_outside(ncs_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - ncs_client.create_notification_channel(name_outside, {}) + ncs_client.create_notification_channel( + request={"name": name_outside, "notification_channel": {}} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_list_notification_channels_inside(ncs_client, name_inside): - list(ncs_client.list_notification_channels(name_inside)) + list(ncs_client.list_notification_channels(request={"name": name_inside})) @staticmethod def test_list_notification_channels_outside(ncs_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - list(ncs_client.list_notification_channels(name_outside)) + list(ncs_client.list_notification_channels(request={"name": name_outside})) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -439,14 +479,18 @@ def test_get_notification_channel_inside( ncs_client, notification_channel_path_inside ): with pytest.raises(exceptions.NotFound): # no perms issue - ncs_client.get_notification_channel(notification_channel_path_inside) + ncs_client.get_notification_channel( + request={"name": notification_channel_path_inside} + ) @staticmethod def test_get_notification_channel_outside( ncs_client, notification_channel_path_outside ): with pytest.raises(exceptions.PermissionDenied) as exc: - ncs_client.get_notification_channel(notification_channel_path_outside) + ncs_client.get_notification_channel( + request={"name": notification_channel_path_outside} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -456,7 +500,7 @@ def test_get_notification_channel_verification_code_inside( ): with pytest.raises(exceptions.NotFound): # no perms issue ncs_client.get_notification_channel_verification_code( - notification_channel_path_inside + request={"name": notification_channel_path_inside} ) @staticmethod @@ -465,7 +509,7 @@ def test_get_notification_channel_verification_code_outside( ): with pytest.raises(exceptions.PermissionDenied) as exc: ncs_client.get_notification_channel_verification_code( - notification_channel_path_outside + request={"name": notification_channel_path_outside} ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -476,7 +520,7 @@ def test_send_notification_channel_verification_code_inside( ): with pytest.raises(exceptions.NotFound): # no perms issue ncs_client.send_notification_channel_verification_code( - notification_channel_path_inside + request={"name": notification_channel_path_inside} ) @staticmethod @@ -485,7 +529,7 @@ def test_send_notification_channel_verification_code_outside( ): with pytest.raises(exceptions.PermissionDenied) as exc: ncs_client.send_notification_channel_verification_code( - notification_channel_path_outside + request={"name": notification_channel_path_outside} ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -495,7 +539,9 @@ def test_verify_notification_channel_inside( ncs_client, notification_channel_path_inside ): with pytest.raises(exceptions.NotFound): # no perms issue - ncs_client.verify_notification_channel(notification_channel_path_inside, "") + ncs_client.verify_notification_channel( + request={"name": notification_channel_path_inside, "code": ""} + ) @staticmethod def test_verify_notification_channel_outside( @@ -503,7 +549,7 @@ def test_verify_notification_channel_outside( ): with pytest.raises(exceptions.PermissionDenied) as exc: ncs_client.verify_notification_channel( - notification_channel_path_outside, "" + request={"name": notification_channel_path_outside, "code": ""} ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -514,7 +560,9 @@ def test_update_notification_channel_inside( ): with pytest.raises(exceptions.InvalidArgument): # no perms issue ncs_client.update_notification_channel( - {"name": notification_channel_path_inside} + request={ + "notification_channel": {"name": notification_channel_path_inside} + } ) @staticmethod @@ -523,7 +571,9 @@ def test_update_notification_channel_outside( ): with pytest.raises(exceptions.PermissionDenied) as exc: ncs_client.update_notification_channel( - {"name": notification_channel_path_outside} + request={ + "notification_channel": {"name": notification_channel_path_outside} + } ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -533,25 +583,37 @@ def test_delete_notification_channel_inside( ncs_client, notification_channel_path_inside ): with pytest.raises(exceptions.NotFound): # no perms issue - ncs_client.delete_notification_channel(notification_channel_path_inside) + ncs_client.delete_notification_channel( + request={"name": notification_channel_path_inside} + ) @staticmethod def test_delete_notification_channel_outside( ncs_client, notification_channel_path_outside ): with pytest.raises(exceptions.PermissionDenied) as exc: - ncs_client.delete_notification_channel(notification_channel_path_outside) + ncs_client.delete_notification_channel( + request={"name": notification_channel_path_outside} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_list_notification_channel_descriptors_inside(ncs_client, name_inside): - list(ncs_client.list_notification_channel_descriptors(name_inside)) + list( + ncs_client.list_notification_channel_descriptors( + request={"name": name_inside} + ) + ) @staticmethod def test_list_notification_channel_descriptors_outside(ncs_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - list(ncs_client.list_notification_channel_descriptors(name_outside)) + list( + ncs_client.list_notification_channel_descriptors( + request={"name": name_outside} + ) + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -561,7 +623,7 @@ def test_get_notification_channel_descriptor_inside( ): with pytest.raises(exceptions.NotFound): # no perms issue ncs_client.get_notification_channel_descriptor( - notification_channel_descriptor_path_inside + request={"name": notification_channel_descriptor_path_inside} ) @staticmethod @@ -570,7 +632,7 @@ def test_get_notification_channel_descriptor_outside( ): with pytest.raises(exceptions.PermissionDenied) as exc: ncs_client.get_notification_channel_descriptor( - notification_channel_descriptor_path_outside + request={"name": notification_channel_descriptor_path_outside} ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -602,23 +664,27 @@ class TestCRUDUptimeCheckConfigs(object): @staticmethod def test_create_uptime_check_config_inside(ucc_client, name_inside): with pytest.raises(exceptions.InvalidArgument): # no perms issue - ucc_client.create_uptime_check_config(name_inside, {}) + ucc_client.create_uptime_check_config( + request={"parent": name_inside, "uptime_check_config": {}} + ) @staticmethod def test_create_uptime_check_config_outside(ucc_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - ucc_client.create_uptime_check_config(name_outside, {}) + ucc_client.create_uptime_check_config( + request={"parent": name_outside, "uptime_check_config": {}} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @staticmethod def test_list_uptime_check_configs_inside(ucc_client, name_inside): - list(ucc_client.list_uptime_check_configs(name_inside)) + list(ucc_client.list_uptime_check_configs(request={"parent": name_inside})) @staticmethod def test_list_uptime_check_configs_outside(ucc_client, name_outside): with pytest.raises(exceptions.PermissionDenied) as exc: - list(ucc_client.list_uptime_check_configs(name_outside)) + list(ucc_client.list_uptime_check_configs(request={"parent": name_outside})) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -627,14 +693,18 @@ def test_get_uptime_check_config_inside( ucc_client, uptime_check_config_path_inside ): with pytest.raises(exceptions.NotFound): # no perms issue - ucc_client.get_uptime_check_config(uptime_check_config_path_inside) + ucc_client.get_uptime_check_config( + request={"name": uptime_check_config_path_inside} + ) @staticmethod def test_get_uptime_check_config_outside( ucc_client, uptime_check_config_path_outside ): with pytest.raises(exceptions.PermissionDenied) as exc: - ucc_client.get_uptime_check_config(uptime_check_config_path_outside) + ucc_client.get_uptime_check_config( + request={"name": uptime_check_config_path_outside} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -644,7 +714,9 @@ def test_update_uptime_check_config_inside( ): with pytest.raises(exceptions.NotFound): # no perms issue ucc_client.update_uptime_check_config( - {"name": uptime_check_config_path_inside} + request={ + "uptime_check_config": {"name": uptime_check_config_path_inside} + } ) @staticmethod @@ -653,7 +725,9 @@ def test_update_uptime_check_config_outside( ): with pytest.raises(exceptions.PermissionDenied) as exc: ucc_client.update_uptime_check_config( - {"name": uptime_check_config_path_outside} + request={ + "uptime_check_config": {"name": uptime_check_config_path_outside} + } ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message @@ -663,13 +737,17 @@ def test_delete_uptime_check_config_inside( ucc_client, uptime_check_config_path_inside ): with pytest.raises(exceptions.NotFound): # no perms issue - ucc_client.delete_uptime_check_config(uptime_check_config_path_inside) + ucc_client.delete_uptime_check_config( + request={"name": uptime_check_config_path_inside} + ) @staticmethod def test_delete_uptime_check_config_outside( ucc_client, uptime_check_config_path_outside ): with pytest.raises(exceptions.PermissionDenied) as exc: - ucc_client.delete_uptime_check_config(uptime_check_config_path_outside) + ucc_client.delete_uptime_check_config( + request={"name": uptime_check_config_path_outside} + ) assert _VPCSC_PROHIBITED_MESSAGE in exc.value.message diff --git a/tests/unit/gapic/monitoring_v3/__init__.py b/tests/unit/gapic/monitoring_v3/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/unit/gapic/monitoring_v3/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/monitoring_v3/test_alert_policy_service.py b/tests/unit/gapic/monitoring_v3/test_alert_policy_service.py new file mode 100644 index 00000000..8d5d0df3 --- /dev/null +++ b/tests/unit/gapic/monitoring_v3/test_alert_policy_service.py @@ -0,0 +1,2189 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.monitoring_v3.services.alert_policy_service import ( + AlertPolicyServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.alert_policy_service import ( + AlertPolicyServiceClient, +) +from google.cloud.monitoring_v3.services.alert_policy_service import pagers +from google.cloud.monitoring_v3.services.alert_policy_service import transports +from google.cloud.monitoring_v3.types import alert +from google.cloud.monitoring_v3.types import alert_service +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import mutation_record +from google.cloud.monitoring_v3.types import mutation_record as gm_mutation_record +from google.oauth2 import service_account +from google.protobuf import any_pb2 as gp_any # type: ignore +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore +from google.rpc import status_pb2 as status # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AlertPolicyServiceClient._get_default_mtls_endpoint(None) is None + assert ( + AlertPolicyServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + AlertPolicyServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + AlertPolicyServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + AlertPolicyServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + AlertPolicyServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [AlertPolicyServiceClient, AlertPolicyServiceAsyncClient] +) +def test_alert_policy_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_alert_policy_service_client_get_transport_class(): + transport = AlertPolicyServiceClient.get_transport_class() + assert transport == transports.AlertPolicyServiceGrpcTransport + + transport = AlertPolicyServiceClient.get_transport_class("grpc") + assert transport == transports.AlertPolicyServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AlertPolicyServiceClient, transports.AlertPolicyServiceGrpcTransport, "grpc"), + ( + AlertPolicyServiceAsyncClient, + transports.AlertPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + AlertPolicyServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(AlertPolicyServiceClient), +) +@mock.patch.object( + AlertPolicyServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(AlertPolicyServiceAsyncClient), +) +def test_alert_policy_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AlertPolicyServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AlertPolicyServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + AlertPolicyServiceClient, + transports.AlertPolicyServiceGrpcTransport, + "grpc", + "true", + ), + ( + AlertPolicyServiceAsyncClient, + transports.AlertPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + AlertPolicyServiceClient, + transports.AlertPolicyServiceGrpcTransport, + "grpc", + "false", + ), + ( + AlertPolicyServiceAsyncClient, + transports.AlertPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + AlertPolicyServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(AlertPolicyServiceClient), +) +@mock.patch.object( + AlertPolicyServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(AlertPolicyServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_alert_policy_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AlertPolicyServiceClient, transports.AlertPolicyServiceGrpcTransport, "grpc"), + ( + AlertPolicyServiceAsyncClient, + transports.AlertPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_alert_policy_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AlertPolicyServiceClient, transports.AlertPolicyServiceGrpcTransport, "grpc"), + ( + AlertPolicyServiceAsyncClient, + transports.AlertPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_alert_policy_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_alert_policy_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.monitoring_v3.services.alert_policy_service.transports.AlertPolicyServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = AlertPolicyServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_alert_policies( + transport: str = "grpc", request_type=alert_service.ListAlertPoliciesRequest +): + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_alert_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert_service.ListAlertPoliciesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_alert_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == alert_service.ListAlertPoliciesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAlertPoliciesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_alert_policies_from_dict(): + test_list_alert_policies(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_alert_policies_async(transport: str = "grpc_asyncio"): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = alert_service.ListAlertPoliciesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_alert_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + alert_service.ListAlertPoliciesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_alert_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAlertPoliciesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_alert_policies_field_headers(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.ListAlertPoliciesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_alert_policies), "__call__" + ) as call: + call.return_value = alert_service.ListAlertPoliciesResponse() + + client.list_alert_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_alert_policies_field_headers_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.ListAlertPoliciesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_alert_policies), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + alert_service.ListAlertPoliciesResponse() + ) + + await client.list_alert_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_alert_policies_flattened(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_alert_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert_service.ListAlertPoliciesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_alert_policies(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_list_alert_policies_flattened_error(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_alert_policies( + alert_service.ListAlertPoliciesRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_list_alert_policies_flattened_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_alert_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert_service.ListAlertPoliciesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + alert_service.ListAlertPoliciesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_alert_policies(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_list_alert_policies_flattened_error_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_alert_policies( + alert_service.ListAlertPoliciesRequest(), name="name_value", + ) + + +def test_list_alert_policies_pager(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_alert_policies), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + alert_service.ListAlertPoliciesResponse( + alert_policies=[ + alert.AlertPolicy(), + alert.AlertPolicy(), + alert.AlertPolicy(), + ], + next_page_token="abc", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[], next_page_token="def", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[alert.AlertPolicy(),], next_page_token="ghi", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[alert.AlertPolicy(), alert.AlertPolicy(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_alert_policies(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, alert.AlertPolicy) for i in results) + + +def test_list_alert_policies_pages(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_alert_policies), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + alert_service.ListAlertPoliciesResponse( + alert_policies=[ + alert.AlertPolicy(), + alert.AlertPolicy(), + alert.AlertPolicy(), + ], + next_page_token="abc", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[], next_page_token="def", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[alert.AlertPolicy(),], next_page_token="ghi", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[alert.AlertPolicy(), alert.AlertPolicy(),], + ), + RuntimeError, + ) + pages = list(client.list_alert_policies(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_alert_policies_async_pager(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_alert_policies), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + alert_service.ListAlertPoliciesResponse( + alert_policies=[ + alert.AlertPolicy(), + alert.AlertPolicy(), + alert.AlertPolicy(), + ], + next_page_token="abc", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[], next_page_token="def", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[alert.AlertPolicy(),], next_page_token="ghi", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[alert.AlertPolicy(), alert.AlertPolicy(),], + ), + RuntimeError, + ) + async_pager = await client.list_alert_policies(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, alert.AlertPolicy) for i in responses) + + +@pytest.mark.asyncio +async def test_list_alert_policies_async_pages(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_alert_policies), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + alert_service.ListAlertPoliciesResponse( + alert_policies=[ + alert.AlertPolicy(), + alert.AlertPolicy(), + alert.AlertPolicy(), + ], + next_page_token="abc", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[], next_page_token="def", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[alert.AlertPolicy(),], next_page_token="ghi", + ), + alert_service.ListAlertPoliciesResponse( + alert_policies=[alert.AlertPolicy(), alert.AlertPolicy(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_alert_policies(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_alert_policy( + transport: str = "grpc", request_type=alert_service.GetAlertPolicyRequest +): + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy( + name="name_value", + display_name="display_name_value", + combiner=alert.AlertPolicy.ConditionCombinerType.AND, + notification_channels=["notification_channels_value"], + ) + + response = client.get_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == alert_service.GetAlertPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, alert.AlertPolicy) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.combiner == alert.AlertPolicy.ConditionCombinerType.AND + + assert response.notification_channels == ["notification_channels_value"] + + +def test_get_alert_policy_from_dict(): + test_get_alert_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_alert_policy_async(transport: str = "grpc_asyncio"): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = alert_service.GetAlertPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + alert.AlertPolicy( + name="name_value", + display_name="display_name_value", + combiner=alert.AlertPolicy.ConditionCombinerType.AND, + notification_channels=["notification_channels_value"], + ) + ) + + response = await client.get_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, alert.AlertPolicy) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.combiner == alert.AlertPolicy.ConditionCombinerType.AND + + assert response.notification_channels == ["notification_channels_value"] + + +def test_get_alert_policy_field_headers(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.GetAlertPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_alert_policy), "__call__" + ) as call: + call.return_value = alert.AlertPolicy() + + client.get_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_alert_policy_field_headers_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.GetAlertPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_alert_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(alert.AlertPolicy()) + + await client.get_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_alert_policy_flattened(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_alert_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_alert_policy_flattened_error(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_alert_policy( + alert_service.GetAlertPolicyRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_alert_policy_flattened_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(alert.AlertPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_alert_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_alert_policy_flattened_error_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_alert_policy( + alert_service.GetAlertPolicyRequest(), name="name_value", + ) + + +def test_create_alert_policy( + transport: str = "grpc", request_type=alert_service.CreateAlertPolicyRequest +): + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy( + name="name_value", + display_name="display_name_value", + combiner=alert.AlertPolicy.ConditionCombinerType.AND, + notification_channels=["notification_channels_value"], + ) + + response = client.create_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == alert_service.CreateAlertPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, alert.AlertPolicy) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.combiner == alert.AlertPolicy.ConditionCombinerType.AND + + assert response.notification_channels == ["notification_channels_value"] + + +def test_create_alert_policy_from_dict(): + test_create_alert_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_alert_policy_async(transport: str = "grpc_asyncio"): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = alert_service.CreateAlertPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + alert.AlertPolicy( + name="name_value", + display_name="display_name_value", + combiner=alert.AlertPolicy.ConditionCombinerType.AND, + notification_channels=["notification_channels_value"], + ) + ) + + response = await client.create_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, alert.AlertPolicy) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.combiner == alert.AlertPolicy.ConditionCombinerType.AND + + assert response.notification_channels == ["notification_channels_value"] + + +def test_create_alert_policy_field_headers(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.CreateAlertPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_alert_policy), "__call__" + ) as call: + call.return_value = alert.AlertPolicy() + + client.create_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_alert_policy_field_headers_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.CreateAlertPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_alert_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(alert.AlertPolicy()) + + await client.create_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_create_alert_policy_flattened(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_alert_policy( + name="name_value", alert_policy=alert.AlertPolicy(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].alert_policy == alert.AlertPolicy(name="name_value") + + +def test_create_alert_policy_flattened_error(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_alert_policy( + alert_service.CreateAlertPolicyRequest(), + name="name_value", + alert_policy=alert.AlertPolicy(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_alert_policy_flattened_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(alert.AlertPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_alert_policy( + name="name_value", alert_policy=alert.AlertPolicy(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].alert_policy == alert.AlertPolicy(name="name_value") + + +@pytest.mark.asyncio +async def test_create_alert_policy_flattened_error_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_alert_policy( + alert_service.CreateAlertPolicyRequest(), + name="name_value", + alert_policy=alert.AlertPolicy(name="name_value"), + ) + + +def test_delete_alert_policy( + transport: str = "grpc", request_type=alert_service.DeleteAlertPolicyRequest +): + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == alert_service.DeleteAlertPolicyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_alert_policy_from_dict(): + test_delete_alert_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_alert_policy_async(transport: str = "grpc_asyncio"): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = alert_service.DeleteAlertPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_alert_policy_field_headers(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.DeleteAlertPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_alert_policy), "__call__" + ) as call: + call.return_value = None + + client.delete_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_alert_policy_field_headers_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.DeleteAlertPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_alert_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_alert_policy_flattened(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_alert_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_alert_policy_flattened_error(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_alert_policy( + alert_service.DeleteAlertPolicyRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_alert_policy_flattened_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_alert_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_alert_policy_flattened_error_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_alert_policy( + alert_service.DeleteAlertPolicyRequest(), name="name_value", + ) + + +def test_update_alert_policy( + transport: str = "grpc", request_type=alert_service.UpdateAlertPolicyRequest +): + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy( + name="name_value", + display_name="display_name_value", + combiner=alert.AlertPolicy.ConditionCombinerType.AND, + notification_channels=["notification_channels_value"], + ) + + response = client.update_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == alert_service.UpdateAlertPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, alert.AlertPolicy) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.combiner == alert.AlertPolicy.ConditionCombinerType.AND + + assert response.notification_channels == ["notification_channels_value"] + + +def test_update_alert_policy_from_dict(): + test_update_alert_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_alert_policy_async(transport: str = "grpc_asyncio"): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = alert_service.UpdateAlertPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + alert.AlertPolicy( + name="name_value", + display_name="display_name_value", + combiner=alert.AlertPolicy.ConditionCombinerType.AND, + notification_channels=["notification_channels_value"], + ) + ) + + response = await client.update_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, alert.AlertPolicy) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.combiner == alert.AlertPolicy.ConditionCombinerType.AND + + assert response.notification_channels == ["notification_channels_value"] + + +def test_update_alert_policy_field_headers(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.UpdateAlertPolicyRequest() + request.alert_policy.name = "alert_policy.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_alert_policy), "__call__" + ) as call: + call.return_value = alert.AlertPolicy() + + client.update_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "alert_policy.name=alert_policy.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_alert_policy_field_headers_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = alert_service.UpdateAlertPolicyRequest() + request.alert_policy.name = "alert_policy.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_alert_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(alert.AlertPolicy()) + + await client.update_alert_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "alert_policy.name=alert_policy.name/value", + ) in kw["metadata"] + + +def test_update_alert_policy_flattened(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_alert_policy( + update_mask=field_mask.FieldMask(paths=["paths_value"]), + alert_policy=alert.AlertPolicy(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + assert args[0].alert_policy == alert.AlertPolicy(name="name_value") + + +def test_update_alert_policy_flattened_error(): + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_alert_policy( + alert_service.UpdateAlertPolicyRequest(), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + alert_policy=alert.AlertPolicy(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_update_alert_policy_flattened_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_alert_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = alert.AlertPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(alert.AlertPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_alert_policy( + update_mask=field_mask.FieldMask(paths=["paths_value"]), + alert_policy=alert.AlertPolicy(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + assert args[0].alert_policy == alert.AlertPolicy(name="name_value") + + +@pytest.mark.asyncio +async def test_update_alert_policy_flattened_error_async(): + client = AlertPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_alert_policy( + alert_service.UpdateAlertPolicyRequest(), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + alert_policy=alert.AlertPolicy(name="name_value"), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AlertPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AlertPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AlertPolicyServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AlertPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AlertPolicyServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AlertPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = AlertPolicyServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AlertPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AlertPolicyServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AlertPolicyServiceGrpcTransport, + transports.AlertPolicyServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AlertPolicyServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.AlertPolicyServiceGrpcTransport,) + + +def test_alert_policy_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.AlertPolicyServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_alert_policy_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.monitoring_v3.services.alert_policy_service.transports.AlertPolicyServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.AlertPolicyServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_alert_policies", + "get_alert_policy", + "create_alert_policy", + "delete_alert_policy", + "update_alert_policy", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_alert_policy_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.monitoring_v3.services.alert_policy_service.transports.AlertPolicyServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.AlertPolicyServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_alert_policy_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.monitoring_v3.services.alert_policy_service.transports.AlertPolicyServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.AlertPolicyServiceTransport() + adc.assert_called_once() + + +def test_alert_policy_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + AlertPolicyServiceClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id=None, + ) + + +def test_alert_policy_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.AlertPolicyServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_alert_policy_service_host_no_port(): + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_alert_policy_service_host_with_port(): + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com:8000" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:8000" + + +def test_alert_policy_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.AlertPolicyServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_alert_policy_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.AlertPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AlertPolicyServiceGrpcTransport, + transports.AlertPolicyServiceGrpcAsyncIOTransport, + ], +) +def test_alert_policy_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AlertPolicyServiceGrpcTransport, + transports.AlertPolicyServiceGrpcAsyncIOTransport, + ], +) +def test_alert_policy_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_alert_policy_path(): + project = "squid" + alert_policy = "clam" + + expected = "projects/{project}/alertPolicies/{alert_policy}".format( + project=project, alert_policy=alert_policy, + ) + actual = AlertPolicyServiceClient.alert_policy_path(project, alert_policy) + assert expected == actual + + +def test_parse_alert_policy_path(): + expected = { + "project": "whelk", + "alert_policy": "octopus", + } + path = AlertPolicyServiceClient.alert_policy_path(**expected) + + # Check that the path construction is reversible. + actual = AlertPolicyServiceClient.parse_alert_policy_path(path) + assert expected == actual + + +def test_alert_policy_condition_path(): + project = "oyster" + alert_policy = "nudibranch" + condition = "cuttlefish" + + expected = "projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}".format( + project=project, alert_policy=alert_policy, condition=condition, + ) + actual = AlertPolicyServiceClient.alert_policy_condition_path( + project, alert_policy, condition + ) + assert expected == actual + + +def test_parse_alert_policy_condition_path(): + expected = { + "project": "mussel", + "alert_policy": "winkle", + "condition": "nautilus", + } + path = AlertPolicyServiceClient.alert_policy_condition_path(**expected) + + # Check that the path construction is reversible. + actual = AlertPolicyServiceClient.parse_alert_policy_condition_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + + expected = "projects/{project}".format(project=project,) + actual = AlertPolicyServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = AlertPolicyServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = AlertPolicyServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + + expected = "organizations/{organization}".format(organization=organization,) + actual = AlertPolicyServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = AlertPolicyServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = AlertPolicyServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + + expected = "folders/{folder}".format(folder=folder,) + actual = AlertPolicyServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = AlertPolicyServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = AlertPolicyServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = AlertPolicyServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = AlertPolicyServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = AlertPolicyServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = AlertPolicyServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = AlertPolicyServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = AlertPolicyServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.AlertPolicyServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = AlertPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.AlertPolicyServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = AlertPolicyServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/monitoring_v3/test_group_service.py b/tests/unit/gapic/monitoring_v3/test_group_service.py new file mode 100644 index 00000000..df85d219 --- /dev/null +++ b/tests/unit/gapic/monitoring_v3/test_group_service.py @@ -0,0 +1,2380 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.monitoring_v3.services.group_service import GroupServiceAsyncClient +from google.cloud.monitoring_v3.services.group_service import GroupServiceClient +from google.cloud.monitoring_v3.services.group_service import pagers +from google.cloud.monitoring_v3.services.group_service import transports +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import group +from google.cloud.monitoring_v3.types import group as gm_group +from google.cloud.monitoring_v3.types import group_service +from google.oauth2 import service_account +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GroupServiceClient._get_default_mtls_endpoint(None) is None + assert ( + GroupServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + ) + assert ( + GroupServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + GroupServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + GroupServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert GroupServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [GroupServiceClient, GroupServiceAsyncClient]) +def test_group_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_group_service_client_get_transport_class(): + transport = GroupServiceClient.get_transport_class() + assert transport == transports.GroupServiceGrpcTransport + + transport = GroupServiceClient.get_transport_class("grpc") + assert transport == transports.GroupServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GroupServiceClient, transports.GroupServiceGrpcTransport, "grpc"), + ( + GroupServiceAsyncClient, + transports.GroupServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + GroupServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GroupServiceClient) +) +@mock.patch.object( + GroupServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GroupServiceAsyncClient), +) +def test_group_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GroupServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GroupServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (GroupServiceClient, transports.GroupServiceGrpcTransport, "grpc", "true"), + ( + GroupServiceAsyncClient, + transports.GroupServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (GroupServiceClient, transports.GroupServiceGrpcTransport, "grpc", "false"), + ( + GroupServiceAsyncClient, + transports.GroupServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + GroupServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GroupServiceClient) +) +@mock.patch.object( + GroupServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GroupServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_group_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GroupServiceClient, transports.GroupServiceGrpcTransport, "grpc"), + ( + GroupServiceAsyncClient, + transports.GroupServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_group_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GroupServiceClient, transports.GroupServiceGrpcTransport, "grpc"), + ( + GroupServiceAsyncClient, + transports.GroupServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_group_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_group_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.monitoring_v3.services.group_service.transports.GroupServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = GroupServiceClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_groups( + transport: str = "grpc", request_type=group_service.ListGroupsRequest +): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_groups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = group_service.ListGroupsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == group_service.ListGroupsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListGroupsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_groups_from_dict(): + test_list_groups(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_groups_async(transport: str = "grpc_asyncio"): + client = GroupServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = group_service.ListGroupsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_groups), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + group_service.ListGroupsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListGroupsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_groups_field_headers(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.ListGroupsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_groups), "__call__") as call: + call.return_value = group_service.ListGroupsResponse() + + client.list_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_groups_field_headers_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.ListGroupsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_groups), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + group_service.ListGroupsResponse() + ) + + await client.list_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_groups_flattened(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_groups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = group_service.ListGroupsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_groups(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_list_groups_flattened_error(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_groups( + group_service.ListGroupsRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_list_groups_flattened_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_groups), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = group_service.ListGroupsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + group_service.ListGroupsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_groups(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_list_groups_flattened_error_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_groups( + group_service.ListGroupsRequest(), name="name_value", + ) + + +def test_list_groups_pager(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_groups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + group_service.ListGroupsResponse( + group=[group.Group(), group.Group(), group.Group(),], + next_page_token="abc", + ), + group_service.ListGroupsResponse(group=[], next_page_token="def",), + group_service.ListGroupsResponse( + group=[group.Group(),], next_page_token="ghi", + ), + group_service.ListGroupsResponse(group=[group.Group(), group.Group(),],), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_groups(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, group.Group) for i in results) + + +def test_list_groups_pages(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_groups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + group_service.ListGroupsResponse( + group=[group.Group(), group.Group(), group.Group(),], + next_page_token="abc", + ), + group_service.ListGroupsResponse(group=[], next_page_token="def",), + group_service.ListGroupsResponse( + group=[group.Group(),], next_page_token="ghi", + ), + group_service.ListGroupsResponse(group=[group.Group(), group.Group(),],), + RuntimeError, + ) + pages = list(client.list_groups(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_groups_async_pager(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_groups), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + group_service.ListGroupsResponse( + group=[group.Group(), group.Group(), group.Group(),], + next_page_token="abc", + ), + group_service.ListGroupsResponse(group=[], next_page_token="def",), + group_service.ListGroupsResponse( + group=[group.Group(),], next_page_token="ghi", + ), + group_service.ListGroupsResponse(group=[group.Group(), group.Group(),],), + RuntimeError, + ) + async_pager = await client.list_groups(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, group.Group) for i in responses) + + +@pytest.mark.asyncio +async def test_list_groups_async_pages(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_groups), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + group_service.ListGroupsResponse( + group=[group.Group(), group.Group(), group.Group(),], + next_page_token="abc", + ), + group_service.ListGroupsResponse(group=[], next_page_token="def",), + group_service.ListGroupsResponse( + group=[group.Group(),], next_page_token="ghi", + ), + group_service.ListGroupsResponse(group=[group.Group(), group.Group(),],), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_groups(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_group(transport: str = "grpc", request_type=group_service.GetGroupRequest): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = group.Group( + name="name_value", + display_name="display_name_value", + parent_name="parent_name_value", + filter="filter_value", + is_cluster=True, + ) + + response = client.get_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == group_service.GetGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, group.Group) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.parent_name == "parent_name_value" + + assert response.filter == "filter_value" + + assert response.is_cluster is True + + +def test_get_group_from_dict(): + test_get_group(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_group_async(transport: str = "grpc_asyncio"): + client = GroupServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = group_service.GetGroupRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + group.Group( + name="name_value", + display_name="display_name_value", + parent_name="parent_name_value", + filter="filter_value", + is_cluster=True, + ) + ) + + response = await client.get_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, group.Group) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.parent_name == "parent_name_value" + + assert response.filter == "filter_value" + + assert response.is_cluster is True + + +def test_get_group_field_headers(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.GetGroupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_group), "__call__") as call: + call.return_value = group.Group() + + client.get_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_group_field_headers_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.GetGroupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_group), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(group.Group()) + + await client.get_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_group_flattened(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = group.Group() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_group(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_group_flattened_error(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_group( + group_service.GetGroupRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_group_flattened_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = group.Group() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(group.Group()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_group(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_group_flattened_error_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_group( + group_service.GetGroupRequest(), name="name_value", + ) + + +def test_create_group( + transport: str = "grpc", request_type=group_service.CreateGroupRequest +): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gm_group.Group( + name="name_value", + display_name="display_name_value", + parent_name="parent_name_value", + filter="filter_value", + is_cluster=True, + ) + + response = client.create_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == group_service.CreateGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gm_group.Group) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.parent_name == "parent_name_value" + + assert response.filter == "filter_value" + + assert response.is_cluster is True + + +def test_create_group_from_dict(): + test_create_group(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_group_async(transport: str = "grpc_asyncio"): + client = GroupServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = group_service.CreateGroupRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gm_group.Group( + name="name_value", + display_name="display_name_value", + parent_name="parent_name_value", + filter="filter_value", + is_cluster=True, + ) + ) + + response = await client.create_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gm_group.Group) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.parent_name == "parent_name_value" + + assert response.filter == "filter_value" + + assert response.is_cluster is True + + +def test_create_group_field_headers(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.CreateGroupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_group), "__call__") as call: + call.return_value = gm_group.Group() + + client.create_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_group_field_headers_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.CreateGroupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_group), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gm_group.Group()) + + await client.create_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_create_group_flattened(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gm_group.Group() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_group( + name="name_value", group=gm_group.Group(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].group == gm_group.Group(name="name_value") + + +def test_create_group_flattened_error(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_group( + group_service.CreateGroupRequest(), + name="name_value", + group=gm_group.Group(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_group_flattened_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gm_group.Group() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gm_group.Group()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_group( + name="name_value", group=gm_group.Group(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].group == gm_group.Group(name="name_value") + + +@pytest.mark.asyncio +async def test_create_group_flattened_error_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_group( + group_service.CreateGroupRequest(), + name="name_value", + group=gm_group.Group(name="name_value"), + ) + + +def test_update_group( + transport: str = "grpc", request_type=group_service.UpdateGroupRequest +): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gm_group.Group( + name="name_value", + display_name="display_name_value", + parent_name="parent_name_value", + filter="filter_value", + is_cluster=True, + ) + + response = client.update_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == group_service.UpdateGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gm_group.Group) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.parent_name == "parent_name_value" + + assert response.filter == "filter_value" + + assert response.is_cluster is True + + +def test_update_group_from_dict(): + test_update_group(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_group_async(transport: str = "grpc_asyncio"): + client = GroupServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = group_service.UpdateGroupRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gm_group.Group( + name="name_value", + display_name="display_name_value", + parent_name="parent_name_value", + filter="filter_value", + is_cluster=True, + ) + ) + + response = await client.update_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gm_group.Group) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.parent_name == "parent_name_value" + + assert response.filter == "filter_value" + + assert response.is_cluster is True + + +def test_update_group_field_headers(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.UpdateGroupRequest() + request.group.name = "group.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_group), "__call__") as call: + call.return_value = gm_group.Group() + + client.update_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "group.name=group.name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_group_field_headers_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.UpdateGroupRequest() + request.group.name = "group.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_group), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gm_group.Group()) + + await client.update_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "group.name=group.name/value",) in kw["metadata"] + + +def test_update_group_flattened(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gm_group.Group() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_group(group=gm_group.Group(name="name_value"),) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].group == gm_group.Group(name="name_value") + + +def test_update_group_flattened_error(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_group( + group_service.UpdateGroupRequest(), group=gm_group.Group(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_update_group_flattened_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gm_group.Group() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gm_group.Group()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_group(group=gm_group.Group(name="name_value"),) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].group == gm_group.Group(name="name_value") + + +@pytest.mark.asyncio +async def test_update_group_flattened_error_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_group( + group_service.UpdateGroupRequest(), group=gm_group.Group(name="name_value"), + ) + + +def test_delete_group( + transport: str = "grpc", request_type=group_service.DeleteGroupRequest +): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == group_service.DeleteGroupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_group_from_dict(): + test_delete_group(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_group_async(transport: str = "grpc_asyncio"): + client = GroupServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = group_service.DeleteGroupRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_group_field_headers(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.DeleteGroupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_group), "__call__") as call: + call.return_value = None + + client.delete_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_group_field_headers_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.DeleteGroupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_group), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_group_flattened(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_group(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_group_flattened_error(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_group( + group_service.DeleteGroupRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_group_flattened_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_group(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_group_flattened_error_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_group( + group_service.DeleteGroupRequest(), name="name_value", + ) + + +def test_list_group_members( + transport: str = "grpc", request_type=group_service.ListGroupMembersRequest +): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_group_members), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = group_service.ListGroupMembersResponse( + next_page_token="next_page_token_value", total_size=1086, + ) + + response = client.list_group_members(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == group_service.ListGroupMembersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListGroupMembersPager) + + assert response.next_page_token == "next_page_token_value" + + assert response.total_size == 1086 + + +def test_list_group_members_from_dict(): + test_list_group_members(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_group_members_async(transport: str = "grpc_asyncio"): + client = GroupServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = group_service.ListGroupMembersRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_group_members), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + group_service.ListGroupMembersResponse( + next_page_token="next_page_token_value", total_size=1086, + ) + ) + + response = await client.list_group_members(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListGroupMembersAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + assert response.total_size == 1086 + + +def test_list_group_members_field_headers(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.ListGroupMembersRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_group_members), "__call__" + ) as call: + call.return_value = group_service.ListGroupMembersResponse() + + client.list_group_members(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_group_members_field_headers_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = group_service.ListGroupMembersRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_group_members), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + group_service.ListGroupMembersResponse() + ) + + await client.list_group_members(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_group_members_flattened(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_group_members), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = group_service.ListGroupMembersResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_group_members(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_list_group_members_flattened_error(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_group_members( + group_service.ListGroupMembersRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_list_group_members_flattened_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_group_members), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = group_service.ListGroupMembersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + group_service.ListGroupMembersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_group_members(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_list_group_members_flattened_error_async(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_group_members( + group_service.ListGroupMembersRequest(), name="name_value", + ) + + +def test_list_group_members_pager(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_group_members), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + group_service.ListGroupMembersResponse( + members=[ + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + ], + next_page_token="abc", + ), + group_service.ListGroupMembersResponse(members=[], next_page_token="def",), + group_service.ListGroupMembersResponse( + members=[monitored_resource.MonitoredResource(),], + next_page_token="ghi", + ), + group_service.ListGroupMembersResponse( + members=[ + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_group_members(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, monitored_resource.MonitoredResource) for i in results) + + +def test_list_group_members_pages(): + client = GroupServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_group_members), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + group_service.ListGroupMembersResponse( + members=[ + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + ], + next_page_token="abc", + ), + group_service.ListGroupMembersResponse(members=[], next_page_token="def",), + group_service.ListGroupMembersResponse( + members=[monitored_resource.MonitoredResource(),], + next_page_token="ghi", + ), + group_service.ListGroupMembersResponse( + members=[ + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + ], + ), + RuntimeError, + ) + pages = list(client.list_group_members(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_group_members_async_pager(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_group_members), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + group_service.ListGroupMembersResponse( + members=[ + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + ], + next_page_token="abc", + ), + group_service.ListGroupMembersResponse(members=[], next_page_token="def",), + group_service.ListGroupMembersResponse( + members=[monitored_resource.MonitoredResource(),], + next_page_token="ghi", + ), + group_service.ListGroupMembersResponse( + members=[ + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_group_members(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, monitored_resource.MonitoredResource) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_group_members_async_pages(): + client = GroupServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_group_members), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + group_service.ListGroupMembersResponse( + members=[ + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + ], + next_page_token="abc", + ), + group_service.ListGroupMembersResponse(members=[], next_page_token="def",), + group_service.ListGroupMembersResponse( + members=[monitored_resource.MonitoredResource(),], + next_page_token="ghi", + ), + group_service.ListGroupMembersResponse( + members=[ + monitored_resource.MonitoredResource(), + monitored_resource.MonitoredResource(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_group_members(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GroupServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GroupServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GroupServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GroupServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GroupServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GroupServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = GroupServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.GroupServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.GroupServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.GroupServiceGrpcTransport, transports.GroupServiceGrpcAsyncIOTransport], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = GroupServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.GroupServiceGrpcTransport,) + + +def test_group_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.GroupServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_group_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.monitoring_v3.services.group_service.transports.GroupServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.GroupServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_groups", + "get_group", + "create_group", + "update_group", + "delete_group", + "list_group_members", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_group_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.monitoring_v3.services.group_service.transports.GroupServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.GroupServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_group_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.monitoring_v3.services.group_service.transports.GroupServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.GroupServiceTransport() + adc.assert_called_once() + + +def test_group_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + GroupServiceClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id=None, + ) + + +def test_group_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.GroupServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_group_service_host_no_port(): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_group_service_host_with_port(): + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com:8000" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:8000" + + +def test_group_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.GroupServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_group_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.GroupServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [transports.GroupServiceGrpcTransport, transports.GroupServiceGrpcAsyncIOTransport], +) +def test_group_service_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.GroupServiceGrpcTransport, transports.GroupServiceGrpcAsyncIOTransport], +) +def test_group_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_group_path(): + project = "squid" + group = "clam" + + expected = "projects/{project}/groups/{group}".format(project=project, group=group,) + actual = GroupServiceClient.group_path(project, group) + assert expected == actual + + +def test_parse_group_path(): + expected = { + "project": "whelk", + "group": "octopus", + } + path = GroupServiceClient.group_path(**expected) + + # Check that the path construction is reversible. + actual = GroupServiceClient.parse_group_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "oyster" + + expected = "projects/{project}".format(project=project,) + actual = GroupServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = GroupServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GroupServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + + expected = "organizations/{organization}".format(organization=organization,) + actual = GroupServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = GroupServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GroupServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "winkle" + + expected = "folders/{folder}".format(folder=folder,) + actual = GroupServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = GroupServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GroupServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "scallop" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = GroupServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = GroupServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GroupServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = GroupServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = GroupServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GroupServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.GroupServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = GroupServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.GroupServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = GroupServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/monitoring_v3/test_metric_service.py b/tests/unit/gapic/monitoring_v3/test_metric_service.py new file mode 100644 index 00000000..7980a123 --- /dev/null +++ b/tests/unit/gapic/monitoring_v3/test_metric_service.py @@ -0,0 +1,3139 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api import distribution_pb2 as distribution # type: ignore +from google.api import label_pb2 as label # type: ignore +from google.api import launch_stage_pb2 as launch_stage # type: ignore +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import metric_pb2 as metric # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.monitoring_v3.services.metric_service import MetricServiceAsyncClient +from google.cloud.monitoring_v3.services.metric_service import MetricServiceClient +from google.cloud.monitoring_v3.services.metric_service import pagers +from google.cloud.monitoring_v3.services.metric_service import transports +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import metric as gm_metric +from google.cloud.monitoring_v3.types import metric_service +from google.oauth2 import service_account +from google.protobuf import any_pb2 as gp_any # type: ignore +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MetricServiceClient._get_default_mtls_endpoint(None) is None + assert ( + MetricServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + MetricServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + MetricServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + MetricServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + MetricServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [MetricServiceClient, MetricServiceAsyncClient] +) +def test_metric_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_metric_service_client_get_transport_class(): + transport = MetricServiceClient.get_transport_class() + assert transport == transports.MetricServiceGrpcTransport + + transport = MetricServiceClient.get_transport_class("grpc") + assert transport == transports.MetricServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetricServiceClient, transports.MetricServiceGrpcTransport, "grpc"), + ( + MetricServiceAsyncClient, + transports.MetricServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + MetricServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetricServiceClient), +) +@mock.patch.object( + MetricServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetricServiceAsyncClient), +) +def test_metric_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MetricServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MetricServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (MetricServiceClient, transports.MetricServiceGrpcTransport, "grpc", "true"), + ( + MetricServiceAsyncClient, + transports.MetricServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (MetricServiceClient, transports.MetricServiceGrpcTransport, "grpc", "false"), + ( + MetricServiceAsyncClient, + transports.MetricServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + MetricServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetricServiceClient), +) +@mock.patch.object( + MetricServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetricServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_metric_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetricServiceClient, transports.MetricServiceGrpcTransport, "grpc"), + ( + MetricServiceAsyncClient, + transports.MetricServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_metric_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetricServiceClient, transports.MetricServiceGrpcTransport, "grpc"), + ( + MetricServiceAsyncClient, + transports.MetricServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_metric_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_metric_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.monitoring_v3.services.metric_service.transports.MetricServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = MetricServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_monitored_resource_descriptors( + transport: str = "grpc", + request_type=metric_service.ListMonitoredResourceDescriptorsRequest, +): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListMonitoredResourceDescriptorsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_monitored_resource_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metric_service.ListMonitoredResourceDescriptorsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMonitoredResourceDescriptorsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_monitored_resource_descriptors_from_dict(): + test_list_monitored_resource_descriptors(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_async( + transport: str = "grpc_asyncio", +): + client = MetricServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = metric_service.ListMonitoredResourceDescriptorsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListMonitoredResourceDescriptorsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_monitored_resource_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMonitoredResourceDescriptorsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_monitored_resource_descriptors_field_headers(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.ListMonitoredResourceDescriptorsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_monitored_resource_descriptors), "__call__" + ) as call: + call.return_value = metric_service.ListMonitoredResourceDescriptorsResponse() + + client.list_monitored_resource_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_field_headers_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.ListMonitoredResourceDescriptorsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_monitored_resource_descriptors), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListMonitoredResourceDescriptorsResponse() + ) + + await client.list_monitored_resource_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_monitored_resource_descriptors_flattened(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListMonitoredResourceDescriptorsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_monitored_resource_descriptors(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_list_monitored_resource_descriptors_flattened_error(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_monitored_resource_descriptors( + metric_service.ListMonitoredResourceDescriptorsRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_flattened_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListMonitoredResourceDescriptorsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListMonitoredResourceDescriptorsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_monitored_resource_descriptors(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_flattened_error_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_monitored_resource_descriptors( + metric_service.ListMonitoredResourceDescriptorsRequest(), name="name_value", + ) + + +def test_list_monitored_resource_descriptors_pager(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="abc", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[], next_page_token="def", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="ghi", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_monitored_resource_descriptors(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, monitored_resource.MonitoredResourceDescriptor) + for i in results + ) + + +def test_list_monitored_resource_descriptors_pages(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="abc", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[], next_page_token="def", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="ghi", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + ), + RuntimeError, + ) + pages = list(client.list_monitored_resource_descriptors(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_async_pager(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_monitored_resource_descriptors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="abc", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[], next_page_token="def", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="ghi", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_monitored_resource_descriptors(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, monitored_resource.MonitoredResourceDescriptor) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_async_pages(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_monitored_resource_descriptors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="abc", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[], next_page_token="def", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="ghi", + ), + metric_service.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.list_monitored_resource_descriptors(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_monitored_resource_descriptor( + transport: str = "grpc", + request_type=metric_service.GetMonitoredResourceDescriptorRequest, +): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_monitored_resource_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = monitored_resource.MonitoredResourceDescriptor( + name="name_value", + type="type_value", + display_name="display_name_value", + description="description_value", + ) + + response = client.get_monitored_resource_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metric_service.GetMonitoredResourceDescriptorRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, monitored_resource.MonitoredResourceDescriptor) + + assert response.name == "name_value" + + assert response.type == "type_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + +def test_get_monitored_resource_descriptor_from_dict(): + test_get_monitored_resource_descriptor(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_monitored_resource_descriptor_async(transport: str = "grpc_asyncio"): + client = MetricServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = metric_service.GetMonitoredResourceDescriptorRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_monitored_resource_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + monitored_resource.MonitoredResourceDescriptor( + name="name_value", + type="type_value", + display_name="display_name_value", + description="description_value", + ) + ) + + response = await client.get_monitored_resource_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, monitored_resource.MonitoredResourceDescriptor) + + assert response.name == "name_value" + + assert response.type == "type_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + +def test_get_monitored_resource_descriptor_field_headers(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.GetMonitoredResourceDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_monitored_resource_descriptor), "__call__" + ) as call: + call.return_value = monitored_resource.MonitoredResourceDescriptor() + + client.get_monitored_resource_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_monitored_resource_descriptor_field_headers_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.GetMonitoredResourceDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_monitored_resource_descriptor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + monitored_resource.MonitoredResourceDescriptor() + ) + + await client.get_monitored_resource_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_monitored_resource_descriptor_flattened(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_monitored_resource_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = monitored_resource.MonitoredResourceDescriptor() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_monitored_resource_descriptor(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_monitored_resource_descriptor_flattened_error(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_monitored_resource_descriptor( + metric_service.GetMonitoredResourceDescriptorRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_monitored_resource_descriptor_flattened_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_monitored_resource_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = monitored_resource.MonitoredResourceDescriptor() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + monitored_resource.MonitoredResourceDescriptor() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_monitored_resource_descriptor(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_monitored_resource_descriptor_flattened_error_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_monitored_resource_descriptor( + metric_service.GetMonitoredResourceDescriptorRequest(), name="name_value", + ) + + +def test_list_metric_descriptors( + transport: str = "grpc", request_type=metric_service.ListMetricDescriptorsRequest +): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_metric_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListMetricDescriptorsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_metric_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metric_service.ListMetricDescriptorsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetricDescriptorsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_metric_descriptors_from_dict(): + test_list_metric_descriptors(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_metric_descriptors_async(transport: str = "grpc_asyncio"): + client = MetricServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = metric_service.ListMetricDescriptorsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_metric_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListMetricDescriptorsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_metric_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetricDescriptorsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_metric_descriptors_field_headers(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.ListMetricDescriptorsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_metric_descriptors), "__call__" + ) as call: + call.return_value = metric_service.ListMetricDescriptorsResponse() + + client.list_metric_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_metric_descriptors_field_headers_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.ListMetricDescriptorsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_metric_descriptors), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListMetricDescriptorsResponse() + ) + + await client.list_metric_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_metric_descriptors_flattened(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_metric_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListMetricDescriptorsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metric_descriptors(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_list_metric_descriptors_flattened_error(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metric_descriptors( + metric_service.ListMetricDescriptorsRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_list_metric_descriptors_flattened_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_metric_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListMetricDescriptorsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListMetricDescriptorsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metric_descriptors(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_list_metric_descriptors_flattened_error_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metric_descriptors( + metric_service.ListMetricDescriptorsRequest(), name="name_value", + ) + + +def test_list_metric_descriptors_pager(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_metric_descriptors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ], + next_page_token="abc", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[], next_page_token="def", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ga_metric.MetricDescriptor(),], + next_page_token="ghi", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_metric_descriptors(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, ga_metric.MetricDescriptor) for i in results) + + +def test_list_metric_descriptors_pages(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_metric_descriptors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ], + next_page_token="abc", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[], next_page_token="def", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ga_metric.MetricDescriptor(),], + next_page_token="ghi", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metric_descriptors(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_metric_descriptors_async_pager(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_metric_descriptors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ], + next_page_token="abc", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[], next_page_token="def", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ga_metric.MetricDescriptor(),], + next_page_token="ghi", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metric_descriptors(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, ga_metric.MetricDescriptor) for i in responses) + + +@pytest.mark.asyncio +async def test_list_metric_descriptors_async_pages(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_metric_descriptors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ], + next_page_token="abc", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[], next_page_token="def", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ga_metric.MetricDescriptor(),], + next_page_token="ghi", + ), + metric_service.ListMetricDescriptorsResponse( + metric_descriptors=[ + ga_metric.MetricDescriptor(), + ga_metric.MetricDescriptor(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metric_descriptors(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_metric_descriptor( + transport: str = "grpc", request_type=metric_service.GetMetricDescriptorRequest +): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ga_metric.MetricDescriptor( + name="name_value", + type="type_value", + metric_kind=ga_metric.MetricDescriptor.MetricKind.GAUGE, + value_type=ga_metric.MetricDescriptor.ValueType.BOOL, + unit="unit_value", + description="description_value", + display_name="display_name_value", + ) + + response = client.get_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metric_service.GetMetricDescriptorRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, ga_metric.MetricDescriptor) + + assert response.name == "name_value" + + assert response.type == "type_value" + + assert response.metric_kind == ga_metric.MetricDescriptor.MetricKind.GAUGE + + assert response.value_type == ga_metric.MetricDescriptor.ValueType.BOOL + + assert response.unit == "unit_value" + + assert response.description == "description_value" + + assert response.display_name == "display_name_value" + + +def test_get_metric_descriptor_from_dict(): + test_get_metric_descriptor(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_metric_descriptor_async(transport: str = "grpc_asyncio"): + client = MetricServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = metric_service.GetMetricDescriptorRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + ga_metric.MetricDescriptor( + name="name_value", + type="type_value", + metric_kind=ga_metric.MetricDescriptor.MetricKind.GAUGE, + value_type=ga_metric.MetricDescriptor.ValueType.BOOL, + unit="unit_value", + description="description_value", + display_name="display_name_value", + ) + ) + + response = await client.get_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, ga_metric.MetricDescriptor) + + assert response.name == "name_value" + + assert response.type == "type_value" + + assert response.metric_kind == ga_metric.MetricDescriptor.MetricKind.GAUGE + + assert response.value_type == ga_metric.MetricDescriptor.ValueType.BOOL + + assert response.unit == "unit_value" + + assert response.description == "description_value" + + assert response.display_name == "display_name_value" + + +def test_get_metric_descriptor_field_headers(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.GetMetricDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_metric_descriptor), "__call__" + ) as call: + call.return_value = ga_metric.MetricDescriptor() + + client.get_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_metric_descriptor_field_headers_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.GetMetricDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_metric_descriptor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + ga_metric.MetricDescriptor() + ) + + await client.get_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_metric_descriptor_flattened(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ga_metric.MetricDescriptor() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metric_descriptor(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_metric_descriptor_flattened_error(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metric_descriptor( + metric_service.GetMetricDescriptorRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_metric_descriptor_flattened_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ga_metric.MetricDescriptor() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + ga_metric.MetricDescriptor() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metric_descriptor(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_metric_descriptor_flattened_error_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metric_descriptor( + metric_service.GetMetricDescriptorRequest(), name="name_value", + ) + + +def test_create_metric_descriptor( + transport: str = "grpc", request_type=metric_service.CreateMetricDescriptorRequest +): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ga_metric.MetricDescriptor( + name="name_value", + type="type_value", + metric_kind=ga_metric.MetricDescriptor.MetricKind.GAUGE, + value_type=ga_metric.MetricDescriptor.ValueType.BOOL, + unit="unit_value", + description="description_value", + display_name="display_name_value", + ) + + response = client.create_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metric_service.CreateMetricDescriptorRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, ga_metric.MetricDescriptor) + + assert response.name == "name_value" + + assert response.type == "type_value" + + assert response.metric_kind == ga_metric.MetricDescriptor.MetricKind.GAUGE + + assert response.value_type == ga_metric.MetricDescriptor.ValueType.BOOL + + assert response.unit == "unit_value" + + assert response.description == "description_value" + + assert response.display_name == "display_name_value" + + +def test_create_metric_descriptor_from_dict(): + test_create_metric_descriptor(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_metric_descriptor_async(transport: str = "grpc_asyncio"): + client = MetricServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = metric_service.CreateMetricDescriptorRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + ga_metric.MetricDescriptor( + name="name_value", + type="type_value", + metric_kind=ga_metric.MetricDescriptor.MetricKind.GAUGE, + value_type=ga_metric.MetricDescriptor.ValueType.BOOL, + unit="unit_value", + description="description_value", + display_name="display_name_value", + ) + ) + + response = await client.create_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, ga_metric.MetricDescriptor) + + assert response.name == "name_value" + + assert response.type == "type_value" + + assert response.metric_kind == ga_metric.MetricDescriptor.MetricKind.GAUGE + + assert response.value_type == ga_metric.MetricDescriptor.ValueType.BOOL + + assert response.unit == "unit_value" + + assert response.description == "description_value" + + assert response.display_name == "display_name_value" + + +def test_create_metric_descriptor_field_headers(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.CreateMetricDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_metric_descriptor), "__call__" + ) as call: + call.return_value = ga_metric.MetricDescriptor() + + client.create_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_metric_descriptor_field_headers_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.CreateMetricDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_metric_descriptor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + ga_metric.MetricDescriptor() + ) + + await client.create_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_create_metric_descriptor_flattened(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ga_metric.MetricDescriptor() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metric_descriptor( + name="name_value", + metric_descriptor=ga_metric.MetricDescriptor(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].metric_descriptor == ga_metric.MetricDescriptor( + name="name_value" + ) + + +def test_create_metric_descriptor_flattened_error(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metric_descriptor( + metric_service.CreateMetricDescriptorRequest(), + name="name_value", + metric_descriptor=ga_metric.MetricDescriptor(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_metric_descriptor_flattened_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ga_metric.MetricDescriptor() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + ga_metric.MetricDescriptor() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metric_descriptor( + name="name_value", + metric_descriptor=ga_metric.MetricDescriptor(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].metric_descriptor == ga_metric.MetricDescriptor( + name="name_value" + ) + + +@pytest.mark.asyncio +async def test_create_metric_descriptor_flattened_error_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metric_descriptor( + metric_service.CreateMetricDescriptorRequest(), + name="name_value", + metric_descriptor=ga_metric.MetricDescriptor(name="name_value"), + ) + + +def test_delete_metric_descriptor( + transport: str = "grpc", request_type=metric_service.DeleteMetricDescriptorRequest +): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metric_service.DeleteMetricDescriptorRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_metric_descriptor_from_dict(): + test_delete_metric_descriptor(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_metric_descriptor_async(transport: str = "grpc_asyncio"): + client = MetricServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = metric_service.DeleteMetricDescriptorRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_metric_descriptor_field_headers(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.DeleteMetricDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_metric_descriptor), "__call__" + ) as call: + call.return_value = None + + client.delete_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_metric_descriptor_field_headers_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.DeleteMetricDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_metric_descriptor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_metric_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_metric_descriptor_flattened(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_metric_descriptor(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_metric_descriptor_flattened_error(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_metric_descriptor( + metric_service.DeleteMetricDescriptorRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_metric_descriptor_flattened_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_metric_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_metric_descriptor(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_metric_descriptor_flattened_error_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_metric_descriptor( + metric_service.DeleteMetricDescriptorRequest(), name="name_value", + ) + + +def test_list_time_series( + transport: str = "grpc", request_type=metric_service.ListTimeSeriesRequest +): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListTimeSeriesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metric_service.ListTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTimeSeriesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_time_series_from_dict(): + test_list_time_series(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_time_series_async(transport: str = "grpc_asyncio"): + client = MetricServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = metric_service.ListTimeSeriesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListTimeSeriesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTimeSeriesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_time_series_field_headers(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.ListTimeSeriesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_time_series), "__call__" + ) as call: + call.return_value = metric_service.ListTimeSeriesResponse() + + client.list_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_time_series_field_headers_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.ListTimeSeriesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_time_series), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListTimeSeriesResponse() + ) + + await client.list_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_time_series_flattened(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListTimeSeriesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_time_series( + name="name_value", + filter="filter_value", + interval=common.TimeInterval(end_time=timestamp.Timestamp(seconds=751)), + view=metric_service.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].filter == "filter_value" + + assert args[0].interval == common.TimeInterval( + end_time=timestamp.Timestamp(seconds=751) + ) + + assert ( + args[0].view == metric_service.ListTimeSeriesRequest.TimeSeriesView.HEADERS + ) + + +def test_list_time_series_flattened_error(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_time_series( + metric_service.ListTimeSeriesRequest(), + name="name_value", + filter="filter_value", + interval=common.TimeInterval(end_time=timestamp.Timestamp(seconds=751)), + view=metric_service.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + ) + + +@pytest.mark.asyncio +async def test_list_time_series_flattened_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metric_service.ListTimeSeriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metric_service.ListTimeSeriesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_time_series( + name="name_value", + filter="filter_value", + interval=common.TimeInterval(end_time=timestamp.Timestamp(seconds=751)), + view=metric_service.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].filter == "filter_value" + + assert args[0].interval == common.TimeInterval( + end_time=timestamp.Timestamp(seconds=751) + ) + + assert ( + args[0].view == metric_service.ListTimeSeriesRequest.TimeSeriesView.HEADERS + ) + + +@pytest.mark.asyncio +async def test_list_time_series_flattened_error_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_time_series( + metric_service.ListTimeSeriesRequest(), + name="name_value", + filter="filter_value", + interval=common.TimeInterval(end_time=timestamp.Timestamp(seconds=751)), + view=metric_service.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + ) + + +def test_list_time_series_pager(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_time_series), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListTimeSeriesResponse( + time_series=[ + gm_metric.TimeSeries(), + gm_metric.TimeSeries(), + gm_metric.TimeSeries(), + ], + next_page_token="abc", + ), + metric_service.ListTimeSeriesResponse( + time_series=[], next_page_token="def", + ), + metric_service.ListTimeSeriesResponse( + time_series=[gm_metric.TimeSeries(),], next_page_token="ghi", + ), + metric_service.ListTimeSeriesResponse( + time_series=[gm_metric.TimeSeries(), gm_metric.TimeSeries(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_time_series(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, gm_metric.TimeSeries) for i in results) + + +def test_list_time_series_pages(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_time_series), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListTimeSeriesResponse( + time_series=[ + gm_metric.TimeSeries(), + gm_metric.TimeSeries(), + gm_metric.TimeSeries(), + ], + next_page_token="abc", + ), + metric_service.ListTimeSeriesResponse( + time_series=[], next_page_token="def", + ), + metric_service.ListTimeSeriesResponse( + time_series=[gm_metric.TimeSeries(),], next_page_token="ghi", + ), + metric_service.ListTimeSeriesResponse( + time_series=[gm_metric.TimeSeries(), gm_metric.TimeSeries(),], + ), + RuntimeError, + ) + pages = list(client.list_time_series(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_time_series_async_pager(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_time_series), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListTimeSeriesResponse( + time_series=[ + gm_metric.TimeSeries(), + gm_metric.TimeSeries(), + gm_metric.TimeSeries(), + ], + next_page_token="abc", + ), + metric_service.ListTimeSeriesResponse( + time_series=[], next_page_token="def", + ), + metric_service.ListTimeSeriesResponse( + time_series=[gm_metric.TimeSeries(),], next_page_token="ghi", + ), + metric_service.ListTimeSeriesResponse( + time_series=[gm_metric.TimeSeries(), gm_metric.TimeSeries(),], + ), + RuntimeError, + ) + async_pager = await client.list_time_series(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, gm_metric.TimeSeries) for i in responses) + + +@pytest.mark.asyncio +async def test_list_time_series_async_pages(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_time_series), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metric_service.ListTimeSeriesResponse( + time_series=[ + gm_metric.TimeSeries(), + gm_metric.TimeSeries(), + gm_metric.TimeSeries(), + ], + next_page_token="abc", + ), + metric_service.ListTimeSeriesResponse( + time_series=[], next_page_token="def", + ), + metric_service.ListTimeSeriesResponse( + time_series=[gm_metric.TimeSeries(),], next_page_token="ghi", + ), + metric_service.ListTimeSeriesResponse( + time_series=[gm_metric.TimeSeries(), gm_metric.TimeSeries(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_time_series(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_create_time_series( + transport: str = "grpc", request_type=metric_service.CreateTimeSeriesRequest +): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.create_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metric_service.CreateTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_create_time_series_from_dict(): + test_create_time_series(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_time_series_async(transport: str = "grpc_asyncio"): + client = MetricServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = metric_service.CreateTimeSeriesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.create_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_create_time_series_field_headers(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.CreateTimeSeriesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_time_series), "__call__" + ) as call: + call.return_value = None + + client.create_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_time_series_field_headers_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metric_service.CreateTimeSeriesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_time_series), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.create_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_create_time_series_flattened(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_time_series( + name="name_value", + time_series=[ + gm_metric.TimeSeries(metric=ga_metric.Metric(type="type_value")) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].time_series == [ + gm_metric.TimeSeries(metric=ga_metric.Metric(type="type_value")) + ] + + +def test_create_time_series_flattened_error(): + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_time_series( + metric_service.CreateTimeSeriesRequest(), + name="name_value", + time_series=[ + gm_metric.TimeSeries(metric=ga_metric.Metric(type="type_value")) + ], + ) + + +@pytest.mark.asyncio +async def test_create_time_series_flattened_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_time_series( + name="name_value", + time_series=[ + gm_metric.TimeSeries(metric=ga_metric.Metric(type="type_value")) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].time_series == [ + gm_metric.TimeSeries(metric=ga_metric.Metric(type="type_value")) + ] + + +@pytest.mark.asyncio +async def test_create_time_series_flattened_error_async(): + client = MetricServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_time_series( + metric_service.CreateTimeSeriesRequest(), + name="name_value", + time_series=[ + gm_metric.TimeSeries(metric=ga_metric.Metric(type="type_value")) + ], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MetricServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MetricServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetricServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MetricServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetricServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetricServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = MetricServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetricServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MetricServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetricServiceGrpcTransport, + transports.MetricServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MetricServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.MetricServiceGrpcTransport,) + + +def test_metric_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.MetricServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_metric_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.monitoring_v3.services.metric_service.transports.MetricServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.MetricServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_monitored_resource_descriptors", + "get_monitored_resource_descriptor", + "list_metric_descriptors", + "get_metric_descriptor", + "create_metric_descriptor", + "delete_metric_descriptor", + "list_time_series", + "create_time_series", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_metric_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.monitoring_v3.services.metric_service.transports.MetricServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.MetricServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + ), + quota_project_id="octopus", + ) + + +def test_metric_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.monitoring_v3.services.metric_service.transports.MetricServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.MetricServiceTransport() + adc.assert_called_once() + + +def test_metric_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + MetricServiceClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + ), + quota_project_id=None, + ) + + +def test_metric_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.MetricServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + ), + quota_project_id="octopus", + ) + + +def test_metric_service_host_no_port(): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_metric_service_host_with_port(): + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com:8000" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:8000" + + +def test_metric_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.MetricServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_metric_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.MetricServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetricServiceGrpcTransport, + transports.MetricServiceGrpcAsyncIOTransport, + ], +) +def test_metric_service_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetricServiceGrpcTransport, + transports.MetricServiceGrpcAsyncIOTransport, + ], +) +def test_metric_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_common_project_path(): + project = "squid" + + expected = "projects/{project}".format(project=project,) + actual = MetricServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = MetricServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MetricServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "whelk" + + expected = "organizations/{organization}".format(organization=organization,) + actual = MetricServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = MetricServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MetricServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + + expected = "folders/{folder}".format(folder=folder,) + actual = MetricServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = MetricServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MetricServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = MetricServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = MetricServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MetricServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = MetricServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = MetricServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MetricServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.MetricServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = MetricServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.MetricServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = MetricServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/monitoring_v3/test_notification_channel_service.py b/tests/unit/gapic/monitoring_v3/test_notification_channel_service.py new file mode 100644 index 00000000..8ed40308 --- /dev/null +++ b/tests/unit/gapic/monitoring_v3/test_notification_channel_service.py @@ -0,0 +1,3696 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api import label_pb2 as label # type: ignore +from google.api import launch_stage_pb2 as launch_stage # type: ignore +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.monitoring_v3.services.notification_channel_service import ( + NotificationChannelServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.notification_channel_service import ( + NotificationChannelServiceClient, +) +from google.cloud.monitoring_v3.services.notification_channel_service import pagers +from google.cloud.monitoring_v3.services.notification_channel_service import transports +from google.cloud.monitoring_v3.types import common +from google.cloud.monitoring_v3.types import notification +from google.cloud.monitoring_v3.types import notification_service +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NotificationChannelServiceClient._get_default_mtls_endpoint(None) is None + assert ( + NotificationChannelServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + NotificationChannelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + NotificationChannelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + NotificationChannelServiceClient._get_default_mtls_endpoint( + sandbox_mtls_endpoint + ) + == sandbox_mtls_endpoint + ) + assert ( + NotificationChannelServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", + [NotificationChannelServiceClient, NotificationChannelServiceAsyncClient], +) +def test_notification_channel_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_notification_channel_service_client_get_transport_class(): + transport = NotificationChannelServiceClient.get_transport_class() + assert transport == transports.NotificationChannelServiceGrpcTransport + + transport = NotificationChannelServiceClient.get_transport_class("grpc") + assert transport == transports.NotificationChannelServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + NotificationChannelServiceClient, + transports.NotificationChannelServiceGrpcTransport, + "grpc", + ), + ( + NotificationChannelServiceAsyncClient, + transports.NotificationChannelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + NotificationChannelServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NotificationChannelServiceClient), +) +@mock.patch.object( + NotificationChannelServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NotificationChannelServiceAsyncClient), +) +def test_notification_channel_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + NotificationChannelServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + NotificationChannelServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + NotificationChannelServiceClient, + transports.NotificationChannelServiceGrpcTransport, + "grpc", + "true", + ), + ( + NotificationChannelServiceAsyncClient, + transports.NotificationChannelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + NotificationChannelServiceClient, + transports.NotificationChannelServiceGrpcTransport, + "grpc", + "false", + ), + ( + NotificationChannelServiceAsyncClient, + transports.NotificationChannelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + NotificationChannelServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NotificationChannelServiceClient), +) +@mock.patch.object( + NotificationChannelServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NotificationChannelServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_notification_channel_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + NotificationChannelServiceClient, + transports.NotificationChannelServiceGrpcTransport, + "grpc", + ), + ( + NotificationChannelServiceAsyncClient, + transports.NotificationChannelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_notification_channel_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + NotificationChannelServiceClient, + transports.NotificationChannelServiceGrpcTransport, + "grpc", + ), + ( + NotificationChannelServiceAsyncClient, + transports.NotificationChannelServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_notification_channel_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_notification_channel_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.monitoring_v3.services.notification_channel_service.transports.NotificationChannelServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = NotificationChannelServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_notification_channel_descriptors( + transport: str = "grpc", + request_type=notification_service.ListNotificationChannelDescriptorsRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channel_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification_service.ListNotificationChannelDescriptorsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_notification_channel_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert ( + args[0] == notification_service.ListNotificationChannelDescriptorsRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotificationChannelDescriptorsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_notification_channel_descriptors_from_dict(): + test_list_notification_channel_descriptors(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_notification_channel_descriptors_async( + transport: str = "grpc_asyncio", +): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.ListNotificationChannelDescriptorsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channel_descriptors), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.ListNotificationChannelDescriptorsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_notification_channel_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotificationChannelDescriptorsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_notification_channel_descriptors_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.ListNotificationChannelDescriptorsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channel_descriptors), "__call__" + ) as call: + call.return_value = ( + notification_service.ListNotificationChannelDescriptorsResponse() + ) + + client.list_notification_channel_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_notification_channel_descriptors_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.ListNotificationChannelDescriptorsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channel_descriptors), + "__call__", + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.ListNotificationChannelDescriptorsResponse() + ) + + await client.list_notification_channel_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_notification_channel_descriptors_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channel_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + notification_service.ListNotificationChannelDescriptorsResponse() + ) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_notification_channel_descriptors(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_list_notification_channel_descriptors_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notification_channel_descriptors( + notification_service.ListNotificationChannelDescriptorsRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_list_notification_channel_descriptors_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channel_descriptors), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + notification_service.ListNotificationChannelDescriptorsResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.ListNotificationChannelDescriptorsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_notification_channel_descriptors( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_list_notification_channel_descriptors_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_notification_channel_descriptors( + notification_service.ListNotificationChannelDescriptorsRequest(), + name="name_value", + ) + + +def test_list_notification_channel_descriptors_pager(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channel_descriptors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[ + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + ], + next_page_token="abc", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[], next_page_token="def", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[notification.NotificationChannelDescriptor(),], + next_page_token="ghi", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[ + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_notification_channel_descriptors(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, notification.NotificationChannelDescriptor) for i in results + ) + + +def test_list_notification_channel_descriptors_pages(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channel_descriptors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[ + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + ], + next_page_token="abc", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[], next_page_token="def", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[notification.NotificationChannelDescriptor(),], + next_page_token="ghi", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[ + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + ], + ), + RuntimeError, + ) + pages = list(client.list_notification_channel_descriptors(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_notification_channel_descriptors_async_pager(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channel_descriptors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[ + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + ], + next_page_token="abc", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[], next_page_token="def", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[notification.NotificationChannelDescriptor(),], + next_page_token="ghi", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[ + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_notification_channel_descriptors(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, notification.NotificationChannelDescriptor) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_notification_channel_descriptors_async_pages(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channel_descriptors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[ + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + ], + next_page_token="abc", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[], next_page_token="def", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[notification.NotificationChannelDescriptor(),], + next_page_token="ghi", + ), + notification_service.ListNotificationChannelDescriptorsResponse( + channel_descriptors=[ + notification.NotificationChannelDescriptor(), + notification.NotificationChannelDescriptor(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.list_notification_channel_descriptors(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_notification_channel_descriptor( + transport: str = "grpc", + request_type=notification_service.GetNotificationChannelDescriptorRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannelDescriptor( + name="name_value", + type_="type__value", + display_name="display_name_value", + description="description_value", + supported_tiers=[common.ServiceTier.SERVICE_TIER_BASIC], + ) + + response = client.get_notification_channel_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == notification_service.GetNotificationChannelDescriptorRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannelDescriptor) + + assert response.name == "name_value" + + assert response.type_ == "type__value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.supported_tiers == [common.ServiceTier.SERVICE_TIER_BASIC] + + +def test_get_notification_channel_descriptor_from_dict(): + test_get_notification_channel_descriptor(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_notification_channel_descriptor_async( + transport: str = "grpc_asyncio", +): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.GetNotificationChannelDescriptorRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannelDescriptor( + name="name_value", + type_="type__value", + display_name="display_name_value", + description="description_value", + supported_tiers=[common.ServiceTier.SERVICE_TIER_BASIC], + ) + ) + + response = await client.get_notification_channel_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannelDescriptor) + + assert response.name == "name_value" + + assert response.type_ == "type__value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.supported_tiers == [common.ServiceTier.SERVICE_TIER_BASIC] + + +def test_get_notification_channel_descriptor_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.GetNotificationChannelDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel_descriptor), "__call__" + ) as call: + call.return_value = notification.NotificationChannelDescriptor() + + client.get_notification_channel_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_notification_channel_descriptor_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.GetNotificationChannelDescriptorRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel_descriptor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannelDescriptor() + ) + + await client.get_notification_channel_descriptor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_notification_channel_descriptor_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannelDescriptor() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_notification_channel_descriptor(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_notification_channel_descriptor_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notification_channel_descriptor( + notification_service.GetNotificationChannelDescriptorRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notification_channel_descriptor_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel_descriptor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannelDescriptor() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannelDescriptor() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_notification_channel_descriptor(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_notification_channel_descriptor_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_notification_channel_descriptor( + notification_service.GetNotificationChannelDescriptorRequest(), + name="name_value", + ) + + +def test_list_notification_channels( + transport: str = "grpc", + request_type=notification_service.ListNotificationChannelsRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channels), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification_service.ListNotificationChannelsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_notification_channels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == notification_service.ListNotificationChannelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotificationChannelsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_notification_channels_from_dict(): + test_list_notification_channels(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_notification_channels_async(transport: str = "grpc_asyncio"): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.ListNotificationChannelsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channels), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.ListNotificationChannelsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_notification_channels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotificationChannelsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_notification_channels_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.ListNotificationChannelsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channels), "__call__" + ) as call: + call.return_value = notification_service.ListNotificationChannelsResponse() + + client.list_notification_channels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_notification_channels_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.ListNotificationChannelsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channels), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.ListNotificationChannelsResponse() + ) + + await client.list_notification_channels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_notification_channels_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channels), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification_service.ListNotificationChannelsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_notification_channels(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_list_notification_channels_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notification_channels( + notification_service.ListNotificationChannelsRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_list_notification_channels_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channels), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification_service.ListNotificationChannelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.ListNotificationChannelsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_notification_channels(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_list_notification_channels_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_notification_channels( + notification_service.ListNotificationChannelsRequest(), name="name_value", + ) + + +def test_list_notification_channels_pager(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channels), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notification_service.ListNotificationChannelsResponse( + notification_channels=[ + notification.NotificationChannel(), + notification.NotificationChannel(), + notification.NotificationChannel(), + ], + next_page_token="abc", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[], next_page_token="def", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[notification.NotificationChannel(),], + next_page_token="ghi", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[ + notification.NotificationChannel(), + notification.NotificationChannel(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), + ) + pager = client.list_notification_channels(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, notification.NotificationChannel) for i in results) + + +def test_list_notification_channels_pages(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_notification_channels), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notification_service.ListNotificationChannelsResponse( + notification_channels=[ + notification.NotificationChannel(), + notification.NotificationChannel(), + notification.NotificationChannel(), + ], + next_page_token="abc", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[], next_page_token="def", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[notification.NotificationChannel(),], + next_page_token="ghi", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[ + notification.NotificationChannel(), + notification.NotificationChannel(), + ], + ), + RuntimeError, + ) + pages = list(client.list_notification_channels(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_notification_channels_async_pager(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channels), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notification_service.ListNotificationChannelsResponse( + notification_channels=[ + notification.NotificationChannel(), + notification.NotificationChannel(), + notification.NotificationChannel(), + ], + next_page_token="abc", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[], next_page_token="def", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[notification.NotificationChannel(),], + next_page_token="ghi", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[ + notification.NotificationChannel(), + notification.NotificationChannel(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_notification_channels(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, notification.NotificationChannel) for i in responses) + + +@pytest.mark.asyncio +async def test_list_notification_channels_async_pages(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_notification_channels), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notification_service.ListNotificationChannelsResponse( + notification_channels=[ + notification.NotificationChannel(), + notification.NotificationChannel(), + notification.NotificationChannel(), + ], + next_page_token="abc", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[], next_page_token="def", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[notification.NotificationChannel(),], + next_page_token="ghi", + ), + notification_service.ListNotificationChannelsResponse( + notification_channels=[ + notification.NotificationChannel(), + notification.NotificationChannel(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_notification_channels(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_notification_channel( + transport: str = "grpc", + request_type=notification_service.GetNotificationChannelRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel( + type_="type__value", + name="name_value", + display_name="display_name_value", + description="description_value", + verification_status=notification.NotificationChannel.VerificationStatus.UNVERIFIED, + ) + + response = client.get_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == notification_service.GetNotificationChannelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannel) + + assert response.type_ == "type__value" + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.verification_status + == notification.NotificationChannel.VerificationStatus.UNVERIFIED + ) + + +def test_get_notification_channel_from_dict(): + test_get_notification_channel(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_notification_channel_async(transport: str = "grpc_asyncio"): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.GetNotificationChannelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel( + type_="type__value", + name="name_value", + display_name="display_name_value", + description="description_value", + verification_status=notification.NotificationChannel.VerificationStatus.UNVERIFIED, + ) + ) + + response = await client.get_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannel) + + assert response.type_ == "type__value" + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.verification_status + == notification.NotificationChannel.VerificationStatus.UNVERIFIED + ) + + +def test_get_notification_channel_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.GetNotificationChannelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel), "__call__" + ) as call: + call.return_value = notification.NotificationChannel() + + client.get_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_notification_channel_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.GetNotificationChannelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel() + ) + + await client.get_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_notification_channel_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_notification_channel(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_notification_channel_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notification_channel( + notification_service.GetNotificationChannelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notification_channel_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_notification_channel(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_notification_channel_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_notification_channel( + notification_service.GetNotificationChannelRequest(), name="name_value", + ) + + +def test_create_notification_channel( + transport: str = "grpc", + request_type=notification_service.CreateNotificationChannelRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel( + type_="type__value", + name="name_value", + display_name="display_name_value", + description="description_value", + verification_status=notification.NotificationChannel.VerificationStatus.UNVERIFIED, + ) + + response = client.create_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == notification_service.CreateNotificationChannelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannel) + + assert response.type_ == "type__value" + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.verification_status + == notification.NotificationChannel.VerificationStatus.UNVERIFIED + ) + + +def test_create_notification_channel_from_dict(): + test_create_notification_channel(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_notification_channel_async(transport: str = "grpc_asyncio"): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.CreateNotificationChannelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel( + type_="type__value", + name="name_value", + display_name="display_name_value", + description="description_value", + verification_status=notification.NotificationChannel.VerificationStatus.UNVERIFIED, + ) + ) + + response = await client.create_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannel) + + assert response.type_ == "type__value" + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.verification_status + == notification.NotificationChannel.VerificationStatus.UNVERIFIED + ) + + +def test_create_notification_channel_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.CreateNotificationChannelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_notification_channel), "__call__" + ) as call: + call.return_value = notification.NotificationChannel() + + client.create_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_notification_channel_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.CreateNotificationChannelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_notification_channel), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel() + ) + + await client.create_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_create_notification_channel_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_notification_channel( + name="name_value", + notification_channel=notification.NotificationChannel(type_="type__value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].notification_channel == notification.NotificationChannel( + type_="type__value" + ) + + +def test_create_notification_channel_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_notification_channel( + notification_service.CreateNotificationChannelRequest(), + name="name_value", + notification_channel=notification.NotificationChannel(type_="type__value"), + ) + + +@pytest.mark.asyncio +async def test_create_notification_channel_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_notification_channel( + name="name_value", + notification_channel=notification.NotificationChannel(type_="type__value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].notification_channel == notification.NotificationChannel( + type_="type__value" + ) + + +@pytest.mark.asyncio +async def test_create_notification_channel_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_notification_channel( + notification_service.CreateNotificationChannelRequest(), + name="name_value", + notification_channel=notification.NotificationChannel(type_="type__value"), + ) + + +def test_update_notification_channel( + transport: str = "grpc", + request_type=notification_service.UpdateNotificationChannelRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel( + type_="type__value", + name="name_value", + display_name="display_name_value", + description="description_value", + verification_status=notification.NotificationChannel.VerificationStatus.UNVERIFIED, + ) + + response = client.update_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == notification_service.UpdateNotificationChannelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannel) + + assert response.type_ == "type__value" + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.verification_status + == notification.NotificationChannel.VerificationStatus.UNVERIFIED + ) + + +def test_update_notification_channel_from_dict(): + test_update_notification_channel(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_notification_channel_async(transport: str = "grpc_asyncio"): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.UpdateNotificationChannelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel( + type_="type__value", + name="name_value", + display_name="display_name_value", + description="description_value", + verification_status=notification.NotificationChannel.VerificationStatus.UNVERIFIED, + ) + ) + + response = await client.update_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannel) + + assert response.type_ == "type__value" + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.verification_status + == notification.NotificationChannel.VerificationStatus.UNVERIFIED + ) + + +def test_update_notification_channel_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.UpdateNotificationChannelRequest() + request.notification_channel.name = "notification_channel.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_notification_channel), "__call__" + ) as call: + call.return_value = notification.NotificationChannel() + + client.update_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "notification_channel.name=notification_channel.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_notification_channel_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.UpdateNotificationChannelRequest() + request.notification_channel.name = "notification_channel.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_notification_channel), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel() + ) + + await client.update_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "notification_channel.name=notification_channel.name/value", + ) in kw["metadata"] + + +def test_update_notification_channel_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_notification_channel( + update_mask=field_mask.FieldMask(paths=["paths_value"]), + notification_channel=notification.NotificationChannel(type_="type__value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + assert args[0].notification_channel == notification.NotificationChannel( + type_="type__value" + ) + + +def test_update_notification_channel_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_notification_channel( + notification_service.UpdateNotificationChannelRequest(), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + notification_channel=notification.NotificationChannel(type_="type__value"), + ) + + +@pytest.mark.asyncio +async def test_update_notification_channel_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_notification_channel( + update_mask=field_mask.FieldMask(paths=["paths_value"]), + notification_channel=notification.NotificationChannel(type_="type__value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + assert args[0].notification_channel == notification.NotificationChannel( + type_="type__value" + ) + + +@pytest.mark.asyncio +async def test_update_notification_channel_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_notification_channel( + notification_service.UpdateNotificationChannelRequest(), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + notification_channel=notification.NotificationChannel(type_="type__value"), + ) + + +def test_delete_notification_channel( + transport: str = "grpc", + request_type=notification_service.DeleteNotificationChannelRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == notification_service.DeleteNotificationChannelRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_notification_channel_from_dict(): + test_delete_notification_channel(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_notification_channel_async(transport: str = "grpc_asyncio"): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.DeleteNotificationChannelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_notification_channel_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.DeleteNotificationChannelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_notification_channel), "__call__" + ) as call: + call.return_value = None + + client.delete_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_notification_channel_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.DeleteNotificationChannelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_notification_channel), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_notification_channel_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_notification_channel( + name="name_value", force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].force == True + + +def test_delete_notification_channel_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notification_channel( + notification_service.DeleteNotificationChannelRequest(), + name="name_value", + force=True, + ) + + +@pytest.mark.asyncio +async def test_delete_notification_channel_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_notification_channel( + name="name_value", force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].force == True + + +@pytest.mark.asyncio +async def test_delete_notification_channel_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_notification_channel( + notification_service.DeleteNotificationChannelRequest(), + name="name_value", + force=True, + ) + + +def test_send_notification_channel_verification_code( + transport: str = "grpc", + request_type=notification_service.SendNotificationChannelVerificationCodeRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.send_notification_channel_verification_code), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.send_notification_channel_verification_code(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert ( + args[0] + == notification_service.SendNotificationChannelVerificationCodeRequest() + ) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_send_notification_channel_verification_code_from_dict(): + test_send_notification_channel_verification_code(request_type=dict) + + +@pytest.mark.asyncio +async def test_send_notification_channel_verification_code_async( + transport: str = "grpc_asyncio", +): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.SendNotificationChannelVerificationCodeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.send_notification_channel_verification_code), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.send_notification_channel_verification_code(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_send_notification_channel_verification_code_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.SendNotificationChannelVerificationCodeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.send_notification_channel_verification_code), "__call__" + ) as call: + call.return_value = None + + client.send_notification_channel_verification_code(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_send_notification_channel_verification_code_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.SendNotificationChannelVerificationCodeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.send_notification_channel_verification_code), + "__call__", + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.send_notification_channel_verification_code(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_send_notification_channel_verification_code_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.send_notification_channel_verification_code), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.send_notification_channel_verification_code(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_send_notification_channel_verification_code_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.send_notification_channel_verification_code( + notification_service.SendNotificationChannelVerificationCodeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_send_notification_channel_verification_code_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.send_notification_channel_verification_code), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.send_notification_channel_verification_code( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_send_notification_channel_verification_code_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.send_notification_channel_verification_code( + notification_service.SendNotificationChannelVerificationCodeRequest(), + name="name_value", + ) + + +def test_get_notification_channel_verification_code( + transport: str = "grpc", + request_type=notification_service.GetNotificationChannelVerificationCodeRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel_verification_code), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification_service.GetNotificationChannelVerificationCodeResponse( + code="code_value", + ) + + response = client.get_notification_channel_verification_code(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert ( + args[0] + == notification_service.GetNotificationChannelVerificationCodeRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance( + response, notification_service.GetNotificationChannelVerificationCodeResponse + ) + + assert response.code == "code_value" + + +def test_get_notification_channel_verification_code_from_dict(): + test_get_notification_channel_verification_code(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_notification_channel_verification_code_async( + transport: str = "grpc_asyncio", +): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.GetNotificationChannelVerificationCodeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel_verification_code), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.GetNotificationChannelVerificationCodeResponse( + code="code_value", + ) + ) + + response = await client.get_notification_channel_verification_code(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance( + response, notification_service.GetNotificationChannelVerificationCodeResponse + ) + + assert response.code == "code_value" + + +def test_get_notification_channel_verification_code_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.GetNotificationChannelVerificationCodeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel_verification_code), "__call__" + ) as call: + call.return_value = ( + notification_service.GetNotificationChannelVerificationCodeResponse() + ) + + client.get_notification_channel_verification_code(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_notification_channel_verification_code_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.GetNotificationChannelVerificationCodeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel_verification_code), + "__call__", + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.GetNotificationChannelVerificationCodeResponse() + ) + + await client.get_notification_channel_verification_code(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_notification_channel_verification_code_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_notification_channel_verification_code), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + notification_service.GetNotificationChannelVerificationCodeResponse() + ) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_notification_channel_verification_code(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_notification_channel_verification_code_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notification_channel_verification_code( + notification_service.GetNotificationChannelVerificationCodeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notification_channel_verification_code_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_notification_channel_verification_code), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + notification_service.GetNotificationChannelVerificationCodeResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification_service.GetNotificationChannelVerificationCodeResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_notification_channel_verification_code( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_notification_channel_verification_code_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_notification_channel_verification_code( + notification_service.GetNotificationChannelVerificationCodeRequest(), + name="name_value", + ) + + +def test_verify_notification_channel( + transport: str = "grpc", + request_type=notification_service.VerifyNotificationChannelRequest, +): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.verify_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel( + type_="type__value", + name="name_value", + display_name="display_name_value", + description="description_value", + verification_status=notification.NotificationChannel.VerificationStatus.UNVERIFIED, + ) + + response = client.verify_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == notification_service.VerifyNotificationChannelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannel) + + assert response.type_ == "type__value" + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.verification_status + == notification.NotificationChannel.VerificationStatus.UNVERIFIED + ) + + +def test_verify_notification_channel_from_dict(): + test_verify_notification_channel(request_type=dict) + + +@pytest.mark.asyncio +async def test_verify_notification_channel_async(transport: str = "grpc_asyncio"): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = notification_service.VerifyNotificationChannelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.verify_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel( + type_="type__value", + name="name_value", + display_name="display_name_value", + description="description_value", + verification_status=notification.NotificationChannel.VerificationStatus.UNVERIFIED, + ) + ) + + response = await client.verify_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notification.NotificationChannel) + + assert response.type_ == "type__value" + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.verification_status + == notification.NotificationChannel.VerificationStatus.UNVERIFIED + ) + + +def test_verify_notification_channel_field_headers(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.VerifyNotificationChannelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.verify_notification_channel), "__call__" + ) as call: + call.return_value = notification.NotificationChannel() + + client.verify_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_verify_notification_channel_field_headers_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notification_service.VerifyNotificationChannelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.verify_notification_channel), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel() + ) + + await client.verify_notification_channel(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_verify_notification_channel_flattened(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.verify_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.verify_notification_channel( + name="name_value", code="code_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].code == "code_value" + + +def test_verify_notification_channel_flattened_error(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.verify_notification_channel( + notification_service.VerifyNotificationChannelRequest(), + name="name_value", + code="code_value", + ) + + +@pytest.mark.asyncio +async def test_verify_notification_channel_flattened_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.verify_notification_channel), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notification.NotificationChannel() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notification.NotificationChannel() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.verify_notification_channel( + name="name_value", code="code_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].code == "code_value" + + +@pytest.mark.asyncio +async def test_verify_notification_channel_flattened_error_async(): + client = NotificationChannelServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.verify_notification_channel( + notification_service.VerifyNotificationChannelRequest(), + name="name_value", + code="code_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NotificationChannelServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NotificationChannelServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotificationChannelServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NotificationChannelServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotificationChannelServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NotificationChannelServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = NotificationChannelServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.NotificationChannelServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.NotificationChannelServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotificationChannelServiceGrpcTransport, + transports.NotificationChannelServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance( + client._transport, transports.NotificationChannelServiceGrpcTransport, + ) + + +def test_notification_channel_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.NotificationChannelServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_notification_channel_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.monitoring_v3.services.notification_channel_service.transports.NotificationChannelServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.NotificationChannelServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_notification_channel_descriptors", + "get_notification_channel_descriptor", + "list_notification_channels", + "get_notification_channel", + "create_notification_channel", + "update_notification_channel", + "delete_notification_channel", + "send_notification_channel_verification_code", + "get_notification_channel_verification_code", + "verify_notification_channel", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_notification_channel_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.monitoring_v3.services.notification_channel_service.transports.NotificationChannelServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.NotificationChannelServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_notification_channel_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.monitoring_v3.services.notification_channel_service.transports.NotificationChannelServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.NotificationChannelServiceTransport() + adc.assert_called_once() + + +def test_notification_channel_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + NotificationChannelServiceClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id=None, + ) + + +def test_notification_channel_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.NotificationChannelServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_notification_channel_service_host_no_port(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_notification_channel_service_host_with_port(): + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com:8000" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:8000" + + +def test_notification_channel_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.NotificationChannelServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_notification_channel_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.NotificationChannelServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotificationChannelServiceGrpcTransport, + transports.NotificationChannelServiceGrpcAsyncIOTransport, + ], +) +def test_notification_channel_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotificationChannelServiceGrpcTransport, + transports.NotificationChannelServiceGrpcAsyncIOTransport, + ], +) +def test_notification_channel_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_notification_channel_path(): + project = "squid" + notification_channel = "clam" + + expected = "projects/{project}/notificationChannels/{notification_channel}".format( + project=project, notification_channel=notification_channel, + ) + actual = NotificationChannelServiceClient.notification_channel_path( + project, notification_channel + ) + assert expected == actual + + +def test_parse_notification_channel_path(): + expected = { + "project": "whelk", + "notification_channel": "octopus", + } + path = NotificationChannelServiceClient.notification_channel_path(**expected) + + # Check that the path construction is reversible. + actual = NotificationChannelServiceClient.parse_notification_channel_path(path) + assert expected == actual + + +def test_notification_channel_descriptor_path(): + project = "oyster" + channel_descriptor = "nudibranch" + + expected = "projects/{project}/notificationChannelDescriptors/{channel_descriptor}".format( + project=project, channel_descriptor=channel_descriptor, + ) + actual = NotificationChannelServiceClient.notification_channel_descriptor_path( + project, channel_descriptor + ) + assert expected == actual + + +def test_parse_notification_channel_descriptor_path(): + expected = { + "project": "cuttlefish", + "channel_descriptor": "mussel", + } + path = NotificationChannelServiceClient.notification_channel_descriptor_path( + **expected + ) + + # Check that the path construction is reversible. + actual = NotificationChannelServiceClient.parse_notification_channel_descriptor_path( + path + ) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + + expected = "projects/{project}".format(project=project,) + actual = NotificationChannelServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = NotificationChannelServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NotificationChannelServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "scallop" + + expected = "organizations/{organization}".format(organization=organization,) + actual = NotificationChannelServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = NotificationChannelServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NotificationChannelServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "squid" + + expected = "folders/{folder}".format(folder=folder,) + actual = NotificationChannelServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = NotificationChannelServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NotificationChannelServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = NotificationChannelServiceClient.common_billing_account_path( + billing_account + ) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = NotificationChannelServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NotificationChannelServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = NotificationChannelServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = NotificationChannelServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NotificationChannelServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.NotificationChannelServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = NotificationChannelServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.NotificationChannelServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = NotificationChannelServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/monitoring_v3/test_service_monitoring_service.py b/tests/unit/gapic/monitoring_v3/test_service_monitoring_service.py new file mode 100644 index 00000000..17fc2b1a --- /dev/null +++ b/tests/unit/gapic/monitoring_v3/test_service_monitoring_service.py @@ -0,0 +1,3422 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.monitoring_v3.services.service_monitoring_service import ( + ServiceMonitoringServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.service_monitoring_service import ( + ServiceMonitoringServiceClient, +) +from google.cloud.monitoring_v3.services.service_monitoring_service import pagers +from google.cloud.monitoring_v3.services.service_monitoring_service import transports +from google.cloud.monitoring_v3.types import service +from google.cloud.monitoring_v3.types import service as gm_service +from google.cloud.monitoring_v3.types import service_service +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.type import calendar_period_pb2 as calendar_period # type: ignore +from google.type import calendar_period_pb2 as gt_calendar_period # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ServiceMonitoringServiceClient._get_default_mtls_endpoint(None) is None + assert ( + ServiceMonitoringServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ServiceMonitoringServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ServiceMonitoringServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ServiceMonitoringServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ServiceMonitoringServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", + [ServiceMonitoringServiceClient, ServiceMonitoringServiceAsyncClient], +) +def test_service_monitoring_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_service_monitoring_service_client_get_transport_class(): + transport = ServiceMonitoringServiceClient.get_transport_class() + assert transport == transports.ServiceMonitoringServiceGrpcTransport + + transport = ServiceMonitoringServiceClient.get_transport_class("grpc") + assert transport == transports.ServiceMonitoringServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ServiceMonitoringServiceClient, + transports.ServiceMonitoringServiceGrpcTransport, + "grpc", + ), + ( + ServiceMonitoringServiceAsyncClient, + transports.ServiceMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + ServiceMonitoringServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ServiceMonitoringServiceClient), +) +@mock.patch.object( + ServiceMonitoringServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ServiceMonitoringServiceAsyncClient), +) +def test_service_monitoring_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + ServiceMonitoringServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + ServiceMonitoringServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + ServiceMonitoringServiceClient, + transports.ServiceMonitoringServiceGrpcTransport, + "grpc", + "true", + ), + ( + ServiceMonitoringServiceAsyncClient, + transports.ServiceMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + ServiceMonitoringServiceClient, + transports.ServiceMonitoringServiceGrpcTransport, + "grpc", + "false", + ), + ( + ServiceMonitoringServiceAsyncClient, + transports.ServiceMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + ServiceMonitoringServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ServiceMonitoringServiceClient), +) +@mock.patch.object( + ServiceMonitoringServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ServiceMonitoringServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_service_monitoring_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ServiceMonitoringServiceClient, + transports.ServiceMonitoringServiceGrpcTransport, + "grpc", + ), + ( + ServiceMonitoringServiceAsyncClient, + transports.ServiceMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_service_monitoring_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ServiceMonitoringServiceClient, + transports.ServiceMonitoringServiceGrpcTransport, + "grpc", + ), + ( + ServiceMonitoringServiceAsyncClient, + transports.ServiceMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_service_monitoring_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_service_monitoring_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.monitoring_v3.services.service_monitoring_service.transports.ServiceMonitoringServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ServiceMonitoringServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_service( + transport: str = "grpc", request_type=service_service.CreateServiceRequest +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_service), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gm_service.Service( + name="name_value", display_name="display_name_value", custom=None, + ) + + response = client.create_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.CreateServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gm_service.Service) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + +def test_create_service_from_dict(): + test_create_service(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_service_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.CreateServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gm_service.Service(name="name_value", display_name="display_name_value",) + ) + + response = await client.create_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gm_service.Service) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + +def test_create_service_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.CreateServiceRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_service), "__call__") as call: + call.return_value = gm_service.Service() + + client.create_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_service_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.CreateServiceRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_service), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gm_service.Service()) + + await client.create_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_service_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_service), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gm_service.Service() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_service( + parent="parent_value", service=gm_service.Service(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].service == gm_service.Service(name="name_value") + + +def test_create_service_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_service( + service_service.CreateServiceRequest(), + parent="parent_value", + service=gm_service.Service(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_service_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gm_service.Service() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gm_service.Service()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_service( + parent="parent_value", service=gm_service.Service(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].service == gm_service.Service(name="name_value") + + +@pytest.mark.asyncio +async def test_create_service_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_service( + service_service.CreateServiceRequest(), + parent="parent_value", + service=gm_service.Service(name="name_value"), + ) + + +def test_get_service( + transport: str = "grpc", request_type=service_service.GetServiceRequest +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_service), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.Service( + name="name_value", display_name="display_name_value", custom=None, + ) + + response = client.get_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.GetServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, service.Service) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + +def test_get_service_from_dict(): + test_get_service(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_service_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.GetServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.Service(name="name_value", display_name="display_name_value",) + ) + + response = await client.get_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, service.Service) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + +def test_get_service_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.GetServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_service), "__call__") as call: + call.return_value = service.Service() + + client.get_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_service_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.GetServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_service), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.Service()) + + await client.get_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_service_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_service), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.Service() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_service(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_service_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_service( + service_service.GetServiceRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_service_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.Service() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.Service()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_service(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_service_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_service( + service_service.GetServiceRequest(), name="name_value", + ) + + +def test_list_services( + transport: str = "grpc", request_type=service_service.ListServicesRequest +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_services), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service_service.ListServicesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_services(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.ListServicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListServicesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_services_from_dict(): + test_list_services(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_services_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.ListServicesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_services), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service_service.ListServicesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_services(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListServicesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_services_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.ListServicesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_services), "__call__") as call: + call.return_value = service_service.ListServicesResponse() + + client.list_services(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_services_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.ListServicesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_services), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service_service.ListServicesResponse() + ) + + await client.list_services(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_services_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_services), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service_service.ListServicesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_services(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_services_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_services( + service_service.ListServicesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_services_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_services), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service_service.ListServicesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service_service.ListServicesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_services(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_services_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_services( + service_service.ListServicesRequest(), parent="parent_value", + ) + + +def test_list_services_pager(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_services), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service_service.ListServicesResponse( + services=[service.Service(), service.Service(), service.Service(),], + next_page_token="abc", + ), + service_service.ListServicesResponse(services=[], next_page_token="def",), + service_service.ListServicesResponse( + services=[service.Service(),], next_page_token="ghi", + ), + service_service.ListServicesResponse( + services=[service.Service(), service.Service(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_services(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, service.Service) for i in results) + + +def test_list_services_pages(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_services), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service_service.ListServicesResponse( + services=[service.Service(), service.Service(), service.Service(),], + next_page_token="abc", + ), + service_service.ListServicesResponse(services=[], next_page_token="def",), + service_service.ListServicesResponse( + services=[service.Service(),], next_page_token="ghi", + ), + service_service.ListServicesResponse( + services=[service.Service(), service.Service(),], + ), + RuntimeError, + ) + pages = list(client.list_services(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_services_async_pager(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_services), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service_service.ListServicesResponse( + services=[service.Service(), service.Service(), service.Service(),], + next_page_token="abc", + ), + service_service.ListServicesResponse(services=[], next_page_token="def",), + service_service.ListServicesResponse( + services=[service.Service(),], next_page_token="ghi", + ), + service_service.ListServicesResponse( + services=[service.Service(), service.Service(),], + ), + RuntimeError, + ) + async_pager = await client.list_services(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, service.Service) for i in responses) + + +@pytest.mark.asyncio +async def test_list_services_async_pages(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_services), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service_service.ListServicesResponse( + services=[service.Service(), service.Service(), service.Service(),], + next_page_token="abc", + ), + service_service.ListServicesResponse(services=[], next_page_token="def",), + service_service.ListServicesResponse( + services=[service.Service(),], next_page_token="ghi", + ), + service_service.ListServicesResponse( + services=[service.Service(), service.Service(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_services(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_service( + transport: str = "grpc", request_type=service_service.UpdateServiceRequest +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_service), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gm_service.Service( + name="name_value", display_name="display_name_value", custom=None, + ) + + response = client.update_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.UpdateServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gm_service.Service) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + +def test_update_service_from_dict(): + test_update_service(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_service_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.UpdateServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gm_service.Service(name="name_value", display_name="display_name_value",) + ) + + response = await client.update_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gm_service.Service) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + +def test_update_service_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.UpdateServiceRequest() + request.service.name = "service.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_service), "__call__") as call: + call.return_value = gm_service.Service() + + client.update_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "service.name=service.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_service_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.UpdateServiceRequest() + request.service.name = "service.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_service), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gm_service.Service()) + + await client.update_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "service.name=service.name/value",) in kw[ + "metadata" + ] + + +def test_update_service_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_service), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gm_service.Service() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_service(service=gm_service.Service(name="name_value"),) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].service == gm_service.Service(name="name_value") + + +def test_update_service_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_service( + service_service.UpdateServiceRequest(), + service=gm_service.Service(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_update_service_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gm_service.Service() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gm_service.Service()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_service( + service=gm_service.Service(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].service == gm_service.Service(name="name_value") + + +@pytest.mark.asyncio +async def test_update_service_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_service( + service_service.UpdateServiceRequest(), + service=gm_service.Service(name="name_value"), + ) + + +def test_delete_service( + transport: str = "grpc", request_type=service_service.DeleteServiceRequest +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_service), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.DeleteServiceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_service_from_dict(): + test_delete_service(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_service_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.DeleteServiceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_service_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.DeleteServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_service), "__call__") as call: + call.return_value = None + + client.delete_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_service_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.DeleteServiceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_service), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_service_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_service), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_service(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_service_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_service( + service_service.DeleteServiceRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_service_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_service), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_service(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_service_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_service( + service_service.DeleteServiceRequest(), name="name_value", + ) + + +def test_create_service_level_objective( + transport: str = "grpc", + request_type=service_service.CreateServiceLevelObjectiveRequest, +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective( + name="name_value", + display_name="display_name_value", + goal=0.419, + rolling_period=duration.Duration(seconds=751), + ) + + response = client.create_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.CreateServiceLevelObjectiveRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, service.ServiceLevelObjective) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert math.isclose(response.goal, 0.419, rel_tol=1e-6) + + +def test_create_service_level_objective_from_dict(): + test_create_service_level_objective(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_service_level_objective_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.CreateServiceLevelObjectiveRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective( + name="name_value", display_name="display_name_value", goal=0.419, + ) + ) + + response = await client.create_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, service.ServiceLevelObjective) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert math.isclose(response.goal, 0.419, rel_tol=1e-6) + + +def test_create_service_level_objective_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.CreateServiceLevelObjectiveRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_service_level_objective), "__call__" + ) as call: + call.return_value = service.ServiceLevelObjective() + + client.create_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_service_level_objective_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.CreateServiceLevelObjectiveRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_service_level_objective), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective() + ) + + await client.create_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_service_level_objective_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_service_level_objective( + parent="parent_value", + service_level_objective=service.ServiceLevelObjective(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].service_level_objective == service.ServiceLevelObjective( + name="name_value" + ) + + +def test_create_service_level_objective_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_service_level_objective( + service_service.CreateServiceLevelObjectiveRequest(), + parent="parent_value", + service_level_objective=service.ServiceLevelObjective(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_service_level_objective_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_service_level_objective( + parent="parent_value", + service_level_objective=service.ServiceLevelObjective(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].service_level_objective == service.ServiceLevelObjective( + name="name_value" + ) + + +@pytest.mark.asyncio +async def test_create_service_level_objective_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_service_level_objective( + service_service.CreateServiceLevelObjectiveRequest(), + parent="parent_value", + service_level_objective=service.ServiceLevelObjective(name="name_value"), + ) + + +def test_get_service_level_objective( + transport: str = "grpc", + request_type=service_service.GetServiceLevelObjectiveRequest, +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective( + name="name_value", + display_name="display_name_value", + goal=0.419, + rolling_period=duration.Duration(seconds=751), + ) + + response = client.get_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.GetServiceLevelObjectiveRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, service.ServiceLevelObjective) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert math.isclose(response.goal, 0.419, rel_tol=1e-6) + + +def test_get_service_level_objective_from_dict(): + test_get_service_level_objective(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_service_level_objective_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.GetServiceLevelObjectiveRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective( + name="name_value", display_name="display_name_value", goal=0.419, + ) + ) + + response = await client.get_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, service.ServiceLevelObjective) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert math.isclose(response.goal, 0.419, rel_tol=1e-6) + + +def test_get_service_level_objective_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.GetServiceLevelObjectiveRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_service_level_objective), "__call__" + ) as call: + call.return_value = service.ServiceLevelObjective() + + client.get_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_service_level_objective_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.GetServiceLevelObjectiveRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_service_level_objective), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective() + ) + + await client.get_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_service_level_objective_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_service_level_objective(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_service_level_objective_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_service_level_objective( + service_service.GetServiceLevelObjectiveRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_service_level_objective_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_service_level_objective(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_service_level_objective_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_service_level_objective( + service_service.GetServiceLevelObjectiveRequest(), name="name_value", + ) + + +def test_list_service_level_objectives( + transport: str = "grpc", + request_type=service_service.ListServiceLevelObjectivesRequest, +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_service_level_objectives), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service_service.ListServiceLevelObjectivesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_service_level_objectives(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.ListServiceLevelObjectivesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListServiceLevelObjectivesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_service_level_objectives_from_dict(): + test_list_service_level_objectives(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_service_level_objectives_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.ListServiceLevelObjectivesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_service_level_objectives), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service_service.ListServiceLevelObjectivesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_service_level_objectives(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListServiceLevelObjectivesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_service_level_objectives_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.ListServiceLevelObjectivesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_service_level_objectives), "__call__" + ) as call: + call.return_value = service_service.ListServiceLevelObjectivesResponse() + + client.list_service_level_objectives(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_service_level_objectives_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.ListServiceLevelObjectivesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_service_level_objectives), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service_service.ListServiceLevelObjectivesResponse() + ) + + await client.list_service_level_objectives(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_service_level_objectives_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_service_level_objectives), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service_service.ListServiceLevelObjectivesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_service_level_objectives(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_service_level_objectives_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_service_level_objectives( + service_service.ListServiceLevelObjectivesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_service_level_objectives_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_service_level_objectives), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service_service.ListServiceLevelObjectivesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service_service.ListServiceLevelObjectivesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_service_level_objectives(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_service_level_objectives_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_service_level_objectives( + service_service.ListServiceLevelObjectivesRequest(), parent="parent_value", + ) + + +def test_list_service_level_objectives_pager(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_service_level_objectives), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[ + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + ], + next_page_token="abc", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[], next_page_token="def", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[service.ServiceLevelObjective(),], + next_page_token="ghi", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[ + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_service_level_objectives(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, service.ServiceLevelObjective) for i in results) + + +def test_list_service_level_objectives_pages(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_service_level_objectives), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[ + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + ], + next_page_token="abc", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[], next_page_token="def", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[service.ServiceLevelObjective(),], + next_page_token="ghi", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[ + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + ], + ), + RuntimeError, + ) + pages = list(client.list_service_level_objectives(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_service_level_objectives_async_pager(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_service_level_objectives), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[ + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + ], + next_page_token="abc", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[], next_page_token="def", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[service.ServiceLevelObjective(),], + next_page_token="ghi", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[ + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_service_level_objectives(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, service.ServiceLevelObjective) for i in responses) + + +@pytest.mark.asyncio +async def test_list_service_level_objectives_async_pages(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_service_level_objectives), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[ + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + ], + next_page_token="abc", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[], next_page_token="def", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[service.ServiceLevelObjective(),], + next_page_token="ghi", + ), + service_service.ListServiceLevelObjectivesResponse( + service_level_objectives=[ + service.ServiceLevelObjective(), + service.ServiceLevelObjective(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.list_service_level_objectives(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_service_level_objective( + transport: str = "grpc", + request_type=service_service.UpdateServiceLevelObjectiveRequest, +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective( + name="name_value", + display_name="display_name_value", + goal=0.419, + rolling_period=duration.Duration(seconds=751), + ) + + response = client.update_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.UpdateServiceLevelObjectiveRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, service.ServiceLevelObjective) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert math.isclose(response.goal, 0.419, rel_tol=1e-6) + + +def test_update_service_level_objective_from_dict(): + test_update_service_level_objective(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_service_level_objective_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.UpdateServiceLevelObjectiveRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective( + name="name_value", display_name="display_name_value", goal=0.419, + ) + ) + + response = await client.update_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, service.ServiceLevelObjective) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert math.isclose(response.goal, 0.419, rel_tol=1e-6) + + +def test_update_service_level_objective_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.UpdateServiceLevelObjectiveRequest() + request.service_level_objective.name = "service_level_objective.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_service_level_objective), "__call__" + ) as call: + call.return_value = service.ServiceLevelObjective() + + client.update_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "service_level_objective.name=service_level_objective.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_service_level_objective_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.UpdateServiceLevelObjectiveRequest() + request.service_level_objective.name = "service_level_objective.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_service_level_objective), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective() + ) + + await client.update_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "service_level_objective.name=service_level_objective.name/value", + ) in kw["metadata"] + + +def test_update_service_level_objective_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_service_level_objective( + service_level_objective=service.ServiceLevelObjective(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].service_level_objective == service.ServiceLevelObjective( + name="name_value" + ) + + +def test_update_service_level_objective_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_service_level_objective( + service_service.UpdateServiceLevelObjectiveRequest(), + service_level_objective=service.ServiceLevelObjective(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_update_service_level_objective_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ServiceLevelObjective() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ServiceLevelObjective() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_service_level_objective( + service_level_objective=service.ServiceLevelObjective(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].service_level_objective == service.ServiceLevelObjective( + name="name_value" + ) + + +@pytest.mark.asyncio +async def test_update_service_level_objective_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_service_level_objective( + service_service.UpdateServiceLevelObjectiveRequest(), + service_level_objective=service.ServiceLevelObjective(name="name_value"), + ) + + +def test_delete_service_level_objective( + transport: str = "grpc", + request_type=service_service.DeleteServiceLevelObjectiveRequest, +): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service_service.DeleteServiceLevelObjectiveRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_service_level_objective_from_dict(): + test_delete_service_level_objective(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_service_level_objective_async(transport: str = "grpc_asyncio"): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service_service.DeleteServiceLevelObjectiveRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_service_level_objective_field_headers(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.DeleteServiceLevelObjectiveRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_service_level_objective), "__call__" + ) as call: + call.return_value = None + + client.delete_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_service_level_objective_field_headers_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service_service.DeleteServiceLevelObjectiveRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_service_level_objective), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_service_level_objective(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_service_level_objective_flattened(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_service_level_objective(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_service_level_objective_flattened_error(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_service_level_objective( + service_service.DeleteServiceLevelObjectiveRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_service_level_objective_flattened_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_service_level_objective), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_service_level_objective(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_service_level_objective_flattened_error_async(): + client = ServiceMonitoringServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_service_level_objective( + service_service.DeleteServiceLevelObjectiveRequest(), name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ServiceMonitoringServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ServiceMonitoringServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ServiceMonitoringServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ServiceMonitoringServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ServiceMonitoringServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ServiceMonitoringServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = ServiceMonitoringServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ServiceMonitoringServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ServiceMonitoringServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ServiceMonitoringServiceGrpcTransport, + transports.ServiceMonitoringServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance( + client._transport, transports.ServiceMonitoringServiceGrpcTransport, + ) + + +def test_service_monitoring_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.ServiceMonitoringServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_service_monitoring_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.monitoring_v3.services.service_monitoring_service.transports.ServiceMonitoringServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ServiceMonitoringServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_service", + "get_service", + "list_services", + "update_service", + "delete_service", + "create_service_level_objective", + "get_service_level_objective", + "list_service_level_objectives", + "update_service_level_objective", + "delete_service_level_objective", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_service_monitoring_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.monitoring_v3.services.service_monitoring_service.transports.ServiceMonitoringServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ServiceMonitoringServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_service_monitoring_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.monitoring_v3.services.service_monitoring_service.transports.ServiceMonitoringServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ServiceMonitoringServiceTransport() + adc.assert_called_once() + + +def test_service_monitoring_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + ServiceMonitoringServiceClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id=None, + ) + + +def test_service_monitoring_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.ServiceMonitoringServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_service_monitoring_service_host_no_port(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_service_monitoring_service_host_with_port(): + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com:8000" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:8000" + + +def test_service_monitoring_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.ServiceMonitoringServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_service_monitoring_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.ServiceMonitoringServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ServiceMonitoringServiceGrpcTransport, + transports.ServiceMonitoringServiceGrpcAsyncIOTransport, + ], +) +def test_service_monitoring_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ServiceMonitoringServiceGrpcTransport, + transports.ServiceMonitoringServiceGrpcAsyncIOTransport, + ], +) +def test_service_monitoring_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_service_path(): + project = "squid" + service = "clam" + + expected = "projects/{project}/services/{service}".format( + project=project, service=service, + ) + actual = ServiceMonitoringServiceClient.service_path(project, service) + assert expected == actual + + +def test_parse_service_path(): + expected = { + "project": "whelk", + "service": "octopus", + } + path = ServiceMonitoringServiceClient.service_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceMonitoringServiceClient.parse_service_path(path) + assert expected == actual + + +def test_service_level_objective_path(): + project = "oyster" + service = "nudibranch" + service_level_objective = "cuttlefish" + + expected = "projects/{project}/services/{service}/serviceLevelObjectives/{service_level_objective}".format( + project=project, + service=service, + service_level_objective=service_level_objective, + ) + actual = ServiceMonitoringServiceClient.service_level_objective_path( + project, service, service_level_objective + ) + assert expected == actual + + +def test_parse_service_level_objective_path(): + expected = { + "project": "mussel", + "service": "winkle", + "service_level_objective": "nautilus", + } + path = ServiceMonitoringServiceClient.service_level_objective_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceMonitoringServiceClient.parse_service_level_objective_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + + expected = "projects/{project}".format(project=project,) + actual = ServiceMonitoringServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = ServiceMonitoringServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceMonitoringServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + + expected = "organizations/{organization}".format(organization=organization,) + actual = ServiceMonitoringServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = ServiceMonitoringServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceMonitoringServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + + expected = "folders/{folder}".format(folder=folder,) + actual = ServiceMonitoringServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ServiceMonitoringServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceMonitoringServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = ServiceMonitoringServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = ServiceMonitoringServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceMonitoringServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = ServiceMonitoringServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = ServiceMonitoringServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceMonitoringServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ServiceMonitoringServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = ServiceMonitoringServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ServiceMonitoringServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ServiceMonitoringServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/monitoring_v3/test_uptime_check_service.py b/tests/unit/gapic/monitoring_v3/test_uptime_check_service.py new file mode 100644 index 00000000..94608b5d --- /dev/null +++ b/tests/unit/gapic/monitoring_v3/test_uptime_check_service.py @@ -0,0 +1,2434 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api import monitored_resource_pb2 as ga_monitored_resource # type: ignore +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.monitoring_v3.services.uptime_check_service import ( + UptimeCheckServiceAsyncClient, +) +from google.cloud.monitoring_v3.services.uptime_check_service import ( + UptimeCheckServiceClient, +) +from google.cloud.monitoring_v3.services.uptime_check_service import pagers +from google.cloud.monitoring_v3.services.uptime_check_service import transports +from google.cloud.monitoring_v3.types import uptime +from google.cloud.monitoring_v3.types import uptime_service +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert UptimeCheckServiceClient._get_default_mtls_endpoint(None) is None + assert ( + UptimeCheckServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + UptimeCheckServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + UptimeCheckServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + UptimeCheckServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + UptimeCheckServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [UptimeCheckServiceClient, UptimeCheckServiceAsyncClient] +) +def test_uptime_check_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_uptime_check_service_client_get_transport_class(): + transport = UptimeCheckServiceClient.get_transport_class() + assert transport == transports.UptimeCheckServiceGrpcTransport + + transport = UptimeCheckServiceClient.get_transport_class("grpc") + assert transport == transports.UptimeCheckServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (UptimeCheckServiceClient, transports.UptimeCheckServiceGrpcTransport, "grpc"), + ( + UptimeCheckServiceAsyncClient, + transports.UptimeCheckServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + UptimeCheckServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(UptimeCheckServiceClient), +) +@mock.patch.object( + UptimeCheckServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(UptimeCheckServiceAsyncClient), +) +def test_uptime_check_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(UptimeCheckServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(UptimeCheckServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + UptimeCheckServiceClient, + transports.UptimeCheckServiceGrpcTransport, + "grpc", + "true", + ), + ( + UptimeCheckServiceAsyncClient, + transports.UptimeCheckServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + UptimeCheckServiceClient, + transports.UptimeCheckServiceGrpcTransport, + "grpc", + "false", + ), + ( + UptimeCheckServiceAsyncClient, + transports.UptimeCheckServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + UptimeCheckServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(UptimeCheckServiceClient), +) +@mock.patch.object( + UptimeCheckServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(UptimeCheckServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_uptime_check_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (UptimeCheckServiceClient, transports.UptimeCheckServiceGrpcTransport, "grpc"), + ( + UptimeCheckServiceAsyncClient, + transports.UptimeCheckServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_uptime_check_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (UptimeCheckServiceClient, transports.UptimeCheckServiceGrpcTransport, "grpc"), + ( + UptimeCheckServiceAsyncClient, + transports.UptimeCheckServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_uptime_check_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_uptime_check_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.monitoring_v3.services.uptime_check_service.transports.UptimeCheckServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = UptimeCheckServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_uptime_check_configs( + transport: str = "grpc", request_type=uptime_service.ListUptimeCheckConfigsRequest +): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_uptime_check_configs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime_service.ListUptimeCheckConfigsResponse( + next_page_token="next_page_token_value", total_size=1086, + ) + + response = client.list_uptime_check_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == uptime_service.ListUptimeCheckConfigsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUptimeCheckConfigsPager) + + assert response.next_page_token == "next_page_token_value" + + assert response.total_size == 1086 + + +def test_list_uptime_check_configs_from_dict(): + test_list_uptime_check_configs(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_uptime_check_configs_async(transport: str = "grpc_asyncio"): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = uptime_service.ListUptimeCheckConfigsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_uptime_check_configs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime_service.ListUptimeCheckConfigsResponse( + next_page_token="next_page_token_value", total_size=1086, + ) + ) + + response = await client.list_uptime_check_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUptimeCheckConfigsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + assert response.total_size == 1086 + + +def test_list_uptime_check_configs_field_headers(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.ListUptimeCheckConfigsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_uptime_check_configs), "__call__" + ) as call: + call.return_value = uptime_service.ListUptimeCheckConfigsResponse() + + client.list_uptime_check_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_uptime_check_configs_field_headers_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.ListUptimeCheckConfigsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_uptime_check_configs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime_service.ListUptimeCheckConfigsResponse() + ) + + await client.list_uptime_check_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_uptime_check_configs_flattened(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_uptime_check_configs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime_service.ListUptimeCheckConfigsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_uptime_check_configs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_uptime_check_configs_flattened_error(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_uptime_check_configs( + uptime_service.ListUptimeCheckConfigsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_uptime_check_configs_flattened_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_uptime_check_configs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime_service.ListUptimeCheckConfigsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime_service.ListUptimeCheckConfigsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_uptime_check_configs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_uptime_check_configs_flattened_error_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_uptime_check_configs( + uptime_service.ListUptimeCheckConfigsRequest(), parent="parent_value", + ) + + +def test_list_uptime_check_configs_pager(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_uptime_check_configs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[ + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + ], + next_page_token="abc", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[], next_page_token="def", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[uptime.UptimeCheckConfig(),], + next_page_token="ghi", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[ + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_uptime_check_configs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, uptime.UptimeCheckConfig) for i in results) + + +def test_list_uptime_check_configs_pages(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_uptime_check_configs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[ + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + ], + next_page_token="abc", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[], next_page_token="def", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[uptime.UptimeCheckConfig(),], + next_page_token="ghi", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[ + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + ], + ), + RuntimeError, + ) + pages = list(client.list_uptime_check_configs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_uptime_check_configs_async_pager(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_uptime_check_configs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[ + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + ], + next_page_token="abc", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[], next_page_token="def", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[uptime.UptimeCheckConfig(),], + next_page_token="ghi", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[ + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_uptime_check_configs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, uptime.UptimeCheckConfig) for i in responses) + + +@pytest.mark.asyncio +async def test_list_uptime_check_configs_async_pages(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_uptime_check_configs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[ + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + ], + next_page_token="abc", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[], next_page_token="def", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[uptime.UptimeCheckConfig(),], + next_page_token="ghi", + ), + uptime_service.ListUptimeCheckConfigsResponse( + uptime_check_configs=[ + uptime.UptimeCheckConfig(), + uptime.UptimeCheckConfig(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_uptime_check_configs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_uptime_check_config( + transport: str = "grpc", request_type=uptime_service.GetUptimeCheckConfigRequest +): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig( + name="name_value", + display_name="display_name_value", + selected_regions=[uptime.UptimeCheckRegion.USA], + is_internal=True, + monitored_resource=monitored_resource.MonitoredResource(type="type_value"), + http_check=uptime.UptimeCheckConfig.HttpCheck( + request_method=uptime.UptimeCheckConfig.HttpCheck.RequestMethod.GET + ), + ) + + response = client.get_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == uptime_service.GetUptimeCheckConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, uptime.UptimeCheckConfig) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.selected_regions == [uptime.UptimeCheckRegion.USA] + + assert response.is_internal is True + + +def test_get_uptime_check_config_from_dict(): + test_get_uptime_check_config(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_uptime_check_config_async(transport: str = "grpc_asyncio"): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = uptime_service.GetUptimeCheckConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig( + name="name_value", + display_name="display_name_value", + selected_regions=[uptime.UptimeCheckRegion.USA], + is_internal=True, + ) + ) + + response = await client.get_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, uptime.UptimeCheckConfig) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.selected_regions == [uptime.UptimeCheckRegion.USA] + + assert response.is_internal is True + + +def test_get_uptime_check_config_field_headers(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.GetUptimeCheckConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_uptime_check_config), "__call__" + ) as call: + call.return_value = uptime.UptimeCheckConfig() + + client.get_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_uptime_check_config_field_headers_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.GetUptimeCheckConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_uptime_check_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig() + ) + + await client.get_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_uptime_check_config_flattened(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_uptime_check_config(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_uptime_check_config_flattened_error(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_uptime_check_config( + uptime_service.GetUptimeCheckConfigRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_uptime_check_config_flattened_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_uptime_check_config(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_uptime_check_config_flattened_error_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_uptime_check_config( + uptime_service.GetUptimeCheckConfigRequest(), name="name_value", + ) + + +def test_create_uptime_check_config( + transport: str = "grpc", request_type=uptime_service.CreateUptimeCheckConfigRequest +): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig( + name="name_value", + display_name="display_name_value", + selected_regions=[uptime.UptimeCheckRegion.USA], + is_internal=True, + monitored_resource=monitored_resource.MonitoredResource(type="type_value"), + http_check=uptime.UptimeCheckConfig.HttpCheck( + request_method=uptime.UptimeCheckConfig.HttpCheck.RequestMethod.GET + ), + ) + + response = client.create_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == uptime_service.CreateUptimeCheckConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, uptime.UptimeCheckConfig) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.selected_regions == [uptime.UptimeCheckRegion.USA] + + assert response.is_internal is True + + +def test_create_uptime_check_config_from_dict(): + test_create_uptime_check_config(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_uptime_check_config_async(transport: str = "grpc_asyncio"): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = uptime_service.CreateUptimeCheckConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig( + name="name_value", + display_name="display_name_value", + selected_regions=[uptime.UptimeCheckRegion.USA], + is_internal=True, + ) + ) + + response = await client.create_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, uptime.UptimeCheckConfig) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.selected_regions == [uptime.UptimeCheckRegion.USA] + + assert response.is_internal is True + + +def test_create_uptime_check_config_field_headers(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.CreateUptimeCheckConfigRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_uptime_check_config), "__call__" + ) as call: + call.return_value = uptime.UptimeCheckConfig() + + client.create_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_uptime_check_config_field_headers_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.CreateUptimeCheckConfigRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_uptime_check_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig() + ) + + await client.create_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_uptime_check_config_flattened(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_uptime_check_config( + parent="parent_value", + uptime_check_config=uptime.UptimeCheckConfig(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].uptime_check_config == uptime.UptimeCheckConfig( + name="name_value" + ) + + +def test_create_uptime_check_config_flattened_error(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_uptime_check_config( + uptime_service.CreateUptimeCheckConfigRequest(), + parent="parent_value", + uptime_check_config=uptime.UptimeCheckConfig(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_uptime_check_config_flattened_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_uptime_check_config( + parent="parent_value", + uptime_check_config=uptime.UptimeCheckConfig(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].uptime_check_config == uptime.UptimeCheckConfig( + name="name_value" + ) + + +@pytest.mark.asyncio +async def test_create_uptime_check_config_flattened_error_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_uptime_check_config( + uptime_service.CreateUptimeCheckConfigRequest(), + parent="parent_value", + uptime_check_config=uptime.UptimeCheckConfig(name="name_value"), + ) + + +def test_update_uptime_check_config( + transport: str = "grpc", request_type=uptime_service.UpdateUptimeCheckConfigRequest +): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig( + name="name_value", + display_name="display_name_value", + selected_regions=[uptime.UptimeCheckRegion.USA], + is_internal=True, + monitored_resource=monitored_resource.MonitoredResource(type="type_value"), + http_check=uptime.UptimeCheckConfig.HttpCheck( + request_method=uptime.UptimeCheckConfig.HttpCheck.RequestMethod.GET + ), + ) + + response = client.update_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == uptime_service.UpdateUptimeCheckConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, uptime.UptimeCheckConfig) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.selected_regions == [uptime.UptimeCheckRegion.USA] + + assert response.is_internal is True + + +def test_update_uptime_check_config_from_dict(): + test_update_uptime_check_config(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_uptime_check_config_async(transport: str = "grpc_asyncio"): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = uptime_service.UpdateUptimeCheckConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig( + name="name_value", + display_name="display_name_value", + selected_regions=[uptime.UptimeCheckRegion.USA], + is_internal=True, + ) + ) + + response = await client.update_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, uptime.UptimeCheckConfig) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.selected_regions == [uptime.UptimeCheckRegion.USA] + + assert response.is_internal is True + + +def test_update_uptime_check_config_field_headers(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.UpdateUptimeCheckConfigRequest() + request.uptime_check_config.name = "uptime_check_config.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_uptime_check_config), "__call__" + ) as call: + call.return_value = uptime.UptimeCheckConfig() + + client.update_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "uptime_check_config.name=uptime_check_config.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_uptime_check_config_field_headers_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.UpdateUptimeCheckConfigRequest() + request.uptime_check_config.name = "uptime_check_config.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_uptime_check_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig() + ) + + await client.update_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "uptime_check_config.name=uptime_check_config.name/value", + ) in kw["metadata"] + + +def test_update_uptime_check_config_flattened(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_uptime_check_config( + uptime_check_config=uptime.UptimeCheckConfig(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].uptime_check_config == uptime.UptimeCheckConfig( + name="name_value" + ) + + +def test_update_uptime_check_config_flattened_error(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_uptime_check_config( + uptime_service.UpdateUptimeCheckConfigRequest(), + uptime_check_config=uptime.UptimeCheckConfig(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_update_uptime_check_config_flattened_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime.UptimeCheckConfig() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime.UptimeCheckConfig() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_uptime_check_config( + uptime_check_config=uptime.UptimeCheckConfig(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].uptime_check_config == uptime.UptimeCheckConfig( + name="name_value" + ) + + +@pytest.mark.asyncio +async def test_update_uptime_check_config_flattened_error_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_uptime_check_config( + uptime_service.UpdateUptimeCheckConfigRequest(), + uptime_check_config=uptime.UptimeCheckConfig(name="name_value"), + ) + + +def test_delete_uptime_check_config( + transport: str = "grpc", request_type=uptime_service.DeleteUptimeCheckConfigRequest +): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == uptime_service.DeleteUptimeCheckConfigRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_uptime_check_config_from_dict(): + test_delete_uptime_check_config(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_uptime_check_config_async(transport: str = "grpc_asyncio"): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = uptime_service.DeleteUptimeCheckConfigRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_uptime_check_config_field_headers(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.DeleteUptimeCheckConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_uptime_check_config), "__call__" + ) as call: + call.return_value = None + + client.delete_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_uptime_check_config_field_headers_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = uptime_service.DeleteUptimeCheckConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_uptime_check_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_uptime_check_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_uptime_check_config_flattened(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_uptime_check_config(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_uptime_check_config_flattened_error(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_uptime_check_config( + uptime_service.DeleteUptimeCheckConfigRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_uptime_check_config_flattened_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_uptime_check_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_uptime_check_config(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_uptime_check_config_flattened_error_async(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_uptime_check_config( + uptime_service.DeleteUptimeCheckConfigRequest(), name="name_value", + ) + + +def test_list_uptime_check_ips( + transport: str = "grpc", request_type=uptime_service.ListUptimeCheckIpsRequest +): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_uptime_check_ips), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = uptime_service.ListUptimeCheckIpsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_uptime_check_ips(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == uptime_service.ListUptimeCheckIpsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUptimeCheckIpsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_uptime_check_ips_from_dict(): + test_list_uptime_check_ips(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_uptime_check_ips_async(transport: str = "grpc_asyncio"): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = uptime_service.ListUptimeCheckIpsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_uptime_check_ips), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + uptime_service.ListUptimeCheckIpsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_uptime_check_ips(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUptimeCheckIpsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_uptime_check_ips_pager(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_uptime_check_ips), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[ + uptime.UptimeCheckIp(), + uptime.UptimeCheckIp(), + uptime.UptimeCheckIp(), + ], + next_page_token="abc", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[], next_page_token="def", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[uptime.UptimeCheckIp(),], next_page_token="ghi", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[uptime.UptimeCheckIp(), uptime.UptimeCheckIp(),], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_uptime_check_ips(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, uptime.UptimeCheckIp) for i in results) + + +def test_list_uptime_check_ips_pages(): + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_uptime_check_ips), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[ + uptime.UptimeCheckIp(), + uptime.UptimeCheckIp(), + uptime.UptimeCheckIp(), + ], + next_page_token="abc", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[], next_page_token="def", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[uptime.UptimeCheckIp(),], next_page_token="ghi", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[uptime.UptimeCheckIp(), uptime.UptimeCheckIp(),], + ), + RuntimeError, + ) + pages = list(client.list_uptime_check_ips(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_uptime_check_ips_async_pager(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_uptime_check_ips), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[ + uptime.UptimeCheckIp(), + uptime.UptimeCheckIp(), + uptime.UptimeCheckIp(), + ], + next_page_token="abc", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[], next_page_token="def", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[uptime.UptimeCheckIp(),], next_page_token="ghi", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[uptime.UptimeCheckIp(), uptime.UptimeCheckIp(),], + ), + RuntimeError, + ) + async_pager = await client.list_uptime_check_ips(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, uptime.UptimeCheckIp) for i in responses) + + +@pytest.mark.asyncio +async def test_list_uptime_check_ips_async_pages(): + client = UptimeCheckServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_uptime_check_ips), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[ + uptime.UptimeCheckIp(), + uptime.UptimeCheckIp(), + uptime.UptimeCheckIp(), + ], + next_page_token="abc", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[], next_page_token="def", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[uptime.UptimeCheckIp(),], next_page_token="ghi", + ), + uptime_service.ListUptimeCheckIpsResponse( + uptime_check_ips=[uptime.UptimeCheckIp(), uptime.UptimeCheckIp(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_uptime_check_ips(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.UptimeCheckServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.UptimeCheckServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = UptimeCheckServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.UptimeCheckServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = UptimeCheckServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.UptimeCheckServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = UptimeCheckServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.UptimeCheckServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.UptimeCheckServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.UptimeCheckServiceGrpcTransport, + transports.UptimeCheckServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = UptimeCheckServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.UptimeCheckServiceGrpcTransport,) + + +def test_uptime_check_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.UptimeCheckServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_uptime_check_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.monitoring_v3.services.uptime_check_service.transports.UptimeCheckServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.UptimeCheckServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_uptime_check_configs", + "get_uptime_check_config", + "create_uptime_check_config", + "update_uptime_check_config", + "delete_uptime_check_config", + "list_uptime_check_ips", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_uptime_check_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.monitoring_v3.services.uptime_check_service.transports.UptimeCheckServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.UptimeCheckServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_uptime_check_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.monitoring_v3.services.uptime_check_service.transports.UptimeCheckServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.UptimeCheckServiceTransport() + adc.assert_called_once() + + +def test_uptime_check_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + UptimeCheckServiceClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id=None, + ) + + +def test_uptime_check_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.UptimeCheckServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + quota_project_id="octopus", + ) + + +def test_uptime_check_service_host_no_port(): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:443" + + +def test_uptime_check_service_host_with_port(): + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="monitoring.googleapis.com:8000" + ), + ) + assert client._transport._host == "monitoring.googleapis.com:8000" + + +def test_uptime_check_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.UptimeCheckServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_uptime_check_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.UptimeCheckServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.UptimeCheckServiceGrpcTransport, + transports.UptimeCheckServiceGrpcAsyncIOTransport, + ], +) +def test_uptime_check_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.UptimeCheckServiceGrpcTransport, + transports.UptimeCheckServiceGrpcAsyncIOTransport, + ], +) +def test_uptime_check_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_uptime_check_config_path(): + project = "squid" + uptime_check_config = "clam" + + expected = "projects/{project}/uptimeCheckConfigs/{uptime_check_config}".format( + project=project, uptime_check_config=uptime_check_config, + ) + actual = UptimeCheckServiceClient.uptime_check_config_path( + project, uptime_check_config + ) + assert expected == actual + + +def test_parse_uptime_check_config_path(): + expected = { + "project": "whelk", + "uptime_check_config": "octopus", + } + path = UptimeCheckServiceClient.uptime_check_config_path(**expected) + + # Check that the path construction is reversible. + actual = UptimeCheckServiceClient.parse_uptime_check_config_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "oyster" + + expected = "projects/{project}".format(project=project,) + actual = UptimeCheckServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = UptimeCheckServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = UptimeCheckServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + + expected = "organizations/{organization}".format(organization=organization,) + actual = UptimeCheckServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = UptimeCheckServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = UptimeCheckServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "winkle" + + expected = "folders/{folder}".format(folder=folder,) + actual = UptimeCheckServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = UptimeCheckServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = UptimeCheckServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "scallop" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = UptimeCheckServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = UptimeCheckServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = UptimeCheckServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = UptimeCheckServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = UptimeCheckServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = UptimeCheckServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.UptimeCheckServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = UptimeCheckServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.UptimeCheckServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = UptimeCheckServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/v3/test_alert_policy_service_client_v3.py b/tests/unit/gapic/v3/test_alert_policy_service_client_v3.py deleted file mode 100644 index 8859c88c..00000000 --- a/tests/unit/gapic/v3/test_alert_policy_service_client_v3.py +++ /dev/null @@ -1,265 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3.proto import alert_pb2 -from google.cloud.monitoring_v3.proto import alert_service_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestAlertPolicyServiceClient(object): - def test_list_alert_policies(self): - # Setup Expected Response - next_page_token = "" - alert_policies_element = {} - alert_policies = [alert_policies_element] - expected_response = { - "next_page_token": next_page_token, - "alert_policies": alert_policies, - } - expected_response = alert_service_pb2.ListAlertPoliciesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_alert_policies(name) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.alert_policies[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = alert_service_pb2.ListAlertPoliciesRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_alert_policies_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_alert_policies(name) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_alert_policy(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = alert_pb2.AlertPolicy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup Request - name = client.alert_policy_path("[PROJECT]", "[ALERT_POLICY]") - - response = client.get_alert_policy(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = alert_service_pb2.GetAlertPolicyRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_alert_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup request - name = client.alert_policy_path("[PROJECT]", "[ALERT_POLICY]") - - with pytest.raises(CustomException): - client.get_alert_policy(name) - - def test_create_alert_policy(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = alert_pb2.AlertPolicy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - alert_policy = {} - - response = client.create_alert_policy(name, alert_policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = alert_service_pb2.CreateAlertPolicyRequest( - name=name, alert_policy=alert_policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_alert_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - alert_policy = {} - - with pytest.raises(CustomException): - client.create_alert_policy(name, alert_policy) - - def test_delete_alert_policy(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup Request - name = client.alert_policy_path("[PROJECT]", "[ALERT_POLICY]") - - client.delete_alert_policy(name) - - assert len(channel.requests) == 1 - expected_request = alert_service_pb2.DeleteAlertPolicyRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_alert_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup request - name = client.alert_policy_path("[PROJECT]", "[ALERT_POLICY]") - - with pytest.raises(CustomException): - client.delete_alert_policy(name) - - def test_update_alert_policy(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = alert_pb2.AlertPolicy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup Request - alert_policy = {} - - response = client.update_alert_policy(alert_policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = alert_service_pb2.UpdateAlertPolicyRequest( - alert_policy=alert_policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_alert_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.AlertPolicyServiceClient() - - # Setup request - alert_policy = {} - - with pytest.raises(CustomException): - client.update_alert_policy(alert_policy) diff --git a/tests/unit/gapic/v3/test_group_service_client_v3.py b/tests/unit/gapic/v3/test_group_service_client_v3.py deleted file mode 100644 index 1be96f89..00000000 --- a/tests/unit/gapic/v3/test_group_service_client_v3.py +++ /dev/null @@ -1,334 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.api import monitored_resource_pb2 -from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3.proto import group_pb2 -from google.cloud.monitoring_v3.proto import group_service_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestGroupServiceClient(object): - def test_list_groups(self): - # Setup Expected Response - next_page_token = "" - group_element = {} - group = [group_element] - expected_response = {"next_page_token": next_page_token, "group": group} - expected_response = group_service_pb2.ListGroupsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_groups(name) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.group[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = group_service_pb2.ListGroupsRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_groups_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_groups(name) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_group(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - parent_name = "parentName1015022848" - filter_ = "filter-1274492040" - is_cluster = False - expected_response = { - "name": name_2, - "display_name": display_name, - "parent_name": parent_name, - "filter": filter_, - "is_cluster": is_cluster, - } - expected_response = group_pb2.Group(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup Request - name = client.group_path("[PROJECT]", "[GROUP]") - - response = client.get_group(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = group_service_pb2.GetGroupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_group_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup request - name = client.group_path("[PROJECT]", "[GROUP]") - - with pytest.raises(CustomException): - client.get_group(name) - - def test_create_group(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - parent_name = "parentName1015022848" - filter_ = "filter-1274492040" - is_cluster = False - expected_response = { - "name": name_2, - "display_name": display_name, - "parent_name": parent_name, - "filter": filter_, - "is_cluster": is_cluster, - } - expected_response = group_pb2.Group(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - group = {} - - response = client.create_group(name, group) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = group_service_pb2.CreateGroupRequest(name=name, group=group) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_group_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - group = {} - - with pytest.raises(CustomException): - client.create_group(name, group) - - def test_update_group(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - parent_name = "parentName1015022848" - filter_ = "filter-1274492040" - is_cluster = False - expected_response = { - "name": name, - "display_name": display_name, - "parent_name": parent_name, - "filter": filter_, - "is_cluster": is_cluster, - } - expected_response = group_pb2.Group(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup Request - group = {} - - response = client.update_group(group) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = group_service_pb2.UpdateGroupRequest(group=group) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_group_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup request - group = {} - - with pytest.raises(CustomException): - client.update_group(group) - - def test_delete_group(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup Request - name = client.group_path("[PROJECT]", "[GROUP]") - - client.delete_group(name) - - assert len(channel.requests) == 1 - expected_request = group_service_pb2.DeleteGroupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_group_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup request - name = client.group_path("[PROJECT]", "[GROUP]") - - with pytest.raises(CustomException): - client.delete_group(name) - - def test_list_group_members(self): - # Setup Expected Response - next_page_token = "" - total_size = 705419236 - members_element = {} - members = [members_element] - expected_response = { - "next_page_token": next_page_token, - "total_size": total_size, - "members": members, - } - expected_response = group_service_pb2.ListGroupMembersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup Request - name = client.group_path("[PROJECT]", "[GROUP]") - - paged_list_response = client.list_group_members(name) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.members[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = group_service_pb2.ListGroupMembersRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_group_members_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.GroupServiceClient() - - # Setup request - name = client.group_path("[PROJECT]", "[GROUP]") - - paged_list_response = client.list_group_members(name) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/tests/unit/gapic/v3/test_metric_service_client_v3.py b/tests/unit/gapic/v3/test_metric_service_client_v3.py deleted file mode 100644 index 77cfa537..00000000 --- a/tests/unit/gapic/v3/test_metric_service_client_v3.py +++ /dev/null @@ -1,441 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.api import metric_pb2 as api_metric_pb2 -from google.api import monitored_resource_pb2 -from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3 import enums -from google.cloud.monitoring_v3.proto import common_pb2 -from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2 -from google.cloud.monitoring_v3.proto import metric_service_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestMetricServiceClient(object): - def test_list_monitored_resource_descriptors(self): - # Setup Expected Response - next_page_token = "" - resource_descriptors_element = {} - resource_descriptors = [resource_descriptors_element] - expected_response = { - "next_page_token": next_page_token, - "resource_descriptors": resource_descriptors, - } - expected_response = metric_service_pb2.ListMonitoredResourceDescriptorsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_monitored_resource_descriptors(name) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.resource_descriptors[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_monitored_resource_descriptors_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_monitored_resource_descriptors(name) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_monitored_resource_descriptor(self): - # Setup Expected Response - name_2 = "name2-1052831874" - type_ = "type3575610" - display_name = "displayName1615086568" - description = "description-1724546052" - expected_response = { - "name": name_2, - "type": type_, - "display_name": display_name, - "description": description, - } - expected_response = monitored_resource_pb2.MonitoredResourceDescriptor( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup Request - name = client.monitored_resource_descriptor_path( - "[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]" - ) - - response = client.get_monitored_resource_descriptor(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = metric_service_pb2.GetMonitoredResourceDescriptorRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_monitored_resource_descriptor_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup request - name = client.monitored_resource_descriptor_path( - "[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]" - ) - - with pytest.raises(CustomException): - client.get_monitored_resource_descriptor(name) - - def test_list_metric_descriptors(self): - # Setup Expected Response - next_page_token = "" - metric_descriptors_element = {} - metric_descriptors = [metric_descriptors_element] - expected_response = { - "next_page_token": next_page_token, - "metric_descriptors": metric_descriptors, - } - expected_response = metric_service_pb2.ListMetricDescriptorsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_metric_descriptors(name) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.metric_descriptors[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = metric_service_pb2.ListMetricDescriptorsRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_metric_descriptors_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_metric_descriptors(name) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_metric_descriptor(self): - # Setup Expected Response - name_2 = "name2-1052831874" - type_ = "type3575610" - unit = "unit3594628" - description = "description-1724546052" - display_name = "displayName1615086568" - expected_response = { - "name": name_2, - "type": type_, - "unit": unit, - "description": description, - "display_name": display_name, - } - expected_response = api_metric_pb2.MetricDescriptor(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup Request - name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]") - - response = client.get_metric_descriptor(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = metric_service_pb2.GetMetricDescriptorRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_metric_descriptor_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup request - name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]") - - with pytest.raises(CustomException): - client.get_metric_descriptor(name) - - def test_create_metric_descriptor(self): - # Setup Expected Response - name_2 = "name2-1052831874" - type_ = "type3575610" - unit = "unit3594628" - description = "description-1724546052" - display_name = "displayName1615086568" - expected_response = { - "name": name_2, - "type": type_, - "unit": unit, - "description": description, - "display_name": display_name, - } - expected_response = api_metric_pb2.MetricDescriptor(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - metric_descriptor = {} - - response = client.create_metric_descriptor(name, metric_descriptor) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = metric_service_pb2.CreateMetricDescriptorRequest( - name=name, metric_descriptor=metric_descriptor - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_metric_descriptor_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - metric_descriptor = {} - - with pytest.raises(CustomException): - client.create_metric_descriptor(name, metric_descriptor) - - def test_delete_metric_descriptor(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup Request - name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]") - - client.delete_metric_descriptor(name) - - assert len(channel.requests) == 1 - expected_request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_metric_descriptor_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup request - name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]") - - with pytest.raises(CustomException): - client.delete_metric_descriptor(name) - - def test_list_time_series(self): - # Setup Expected Response - next_page_token = "" - time_series_element = {} - time_series = [time_series_element] - expected_response = { - "next_page_token": next_page_token, - "time_series": time_series, - } - expected_response = metric_service_pb2.ListTimeSeriesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - filter_ = "filter-1274492040" - interval = {} - view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL - - paged_list_response = client.list_time_series(name, filter_, interval, view) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.time_series[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = metric_service_pb2.ListTimeSeriesRequest( - name=name, filter=filter_, interval=interval, view=view - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_time_series_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - filter_ = "filter-1274492040" - interval = {} - view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL - - paged_list_response = client.list_time_series(name, filter_, interval, view) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_create_time_series(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - time_series = [] - - client.create_time_series(name, time_series) - - assert len(channel.requests) == 1 - expected_request = metric_service_pb2.CreateTimeSeriesRequest( - name=name, time_series=time_series - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_time_series_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.MetricServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - time_series = [] - - with pytest.raises(CustomException): - client.create_time_series(name, time_series) diff --git a/tests/unit/gapic/v3/test_notification_channel_service_client_v3.py b/tests/unit/gapic/v3/test_notification_channel_service_client_v3.py deleted file mode 100644 index dca35258..00000000 --- a/tests/unit/gapic/v3/test_notification_channel_service_client_v3.py +++ /dev/null @@ -1,521 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3.proto import notification_pb2 -from google.cloud.monitoring_v3.proto import notification_service_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestNotificationChannelServiceClient(object): - def test_list_notification_channel_descriptors(self): - # Setup Expected Response - next_page_token = "" - channel_descriptors_element = {} - channel_descriptors = [channel_descriptors_element] - expected_response = { - "next_page_token": next_page_token, - "channel_descriptors": channel_descriptors, - } - expected_response = notification_service_pb2.ListNotificationChannelDescriptorsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_notification_channel_descriptors(name) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.channel_descriptors[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.ListNotificationChannelDescriptorsRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_notification_channel_descriptors_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_notification_channel_descriptors(name) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_notification_channel_descriptor(self): - # Setup Expected Response - name_2 = "name2-1052831874" - type_ = "type3575610" - display_name = "displayName1615086568" - description = "description-1724546052" - expected_response = { - "name": name_2, - "type": type_, - "display_name": display_name, - "description": description, - } - expected_response = notification_pb2.NotificationChannelDescriptor( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.notification_channel_descriptor_path( - "[PROJECT]", "[CHANNEL_DESCRIPTOR]" - ) - - response = client.get_notification_channel_descriptor(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.GetNotificationChannelDescriptorRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_notification_channel_descriptor_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.notification_channel_descriptor_path( - "[PROJECT]", "[CHANNEL_DESCRIPTOR]" - ) - - with pytest.raises(CustomException): - client.get_notification_channel_descriptor(name) - - def test_list_notification_channels(self): - # Setup Expected Response - next_page_token = "" - notification_channels_element = {} - notification_channels = [notification_channels_element] - expected_response = { - "next_page_token": next_page_token, - "notification_channels": notification_channels, - } - expected_response = notification_service_pb2.ListNotificationChannelsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_notification_channels(name) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.notification_channels[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.ListNotificationChannelsRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_notification_channels_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - - paged_list_response = client.list_notification_channels(name) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_notification_channel(self): - # Setup Expected Response - type_ = "type3575610" - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - description = "description-1724546052" - expected_response = { - "type": type_, - "name": name_2, - "display_name": display_name, - "description": description, - } - expected_response = notification_pb2.NotificationChannel(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - - response = client.get_notification_channel(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.GetNotificationChannelRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_notification_channel_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - - with pytest.raises(CustomException): - client.get_notification_channel(name) - - def test_create_notification_channel(self): - # Setup Expected Response - type_ = "type3575610" - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - description = "description-1724546052" - expected_response = { - "type": type_, - "name": name_2, - "display_name": display_name, - "description": description, - } - expected_response = notification_pb2.NotificationChannel(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.project_path("[PROJECT]") - notification_channel = {} - - response = client.create_notification_channel(name, notification_channel) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.CreateNotificationChannelRequest( - name=name, notification_channel=notification_channel - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_notification_channel_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.project_path("[PROJECT]") - notification_channel = {} - - with pytest.raises(CustomException): - client.create_notification_channel(name, notification_channel) - - def test_update_notification_channel(self): - # Setup Expected Response - type_ = "type3575610" - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - expected_response = { - "type": type_, - "name": name, - "display_name": display_name, - "description": description, - } - expected_response = notification_pb2.NotificationChannel(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - notification_channel = {} - - response = client.update_notification_channel(notification_channel) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.UpdateNotificationChannelRequest( - notification_channel=notification_channel - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_notification_channel_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - notification_channel = {} - - with pytest.raises(CustomException): - client.update_notification_channel(notification_channel) - - def test_delete_notification_channel(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - - client.delete_notification_channel(name) - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.DeleteNotificationChannelRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_notification_channel_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - - with pytest.raises(CustomException): - client.delete_notification_channel(name) - - def test_send_notification_channel_verification_code(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - - client.send_notification_channel_verification_code(name) - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.SendNotificationChannelVerificationCodeRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_send_notification_channel_verification_code_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - - with pytest.raises(CustomException): - client.send_notification_channel_verification_code(name) - - def test_get_notification_channel_verification_code(self): - # Setup Expected Response - code = "code3059181" - expected_response = {"code": code} - expected_response = notification_service_pb2.GetNotificationChannelVerificationCodeResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - - response = client.get_notification_channel_verification_code(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.GetNotificationChannelVerificationCodeRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_notification_channel_verification_code_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - - with pytest.raises(CustomException): - client.get_notification_channel_verification_code(name) - - def test_verify_notification_channel(self): - # Setup Expected Response - type_ = "type3575610" - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - description = "description-1724546052" - expected_response = { - "type": type_, - "name": name_2, - "display_name": display_name, - "description": description, - } - expected_response = notification_pb2.NotificationChannel(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup Request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - code = "code3059181" - - response = client.verify_notification_channel(name, code) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = notification_service_pb2.VerifyNotificationChannelRequest( - name=name, code=code - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_verify_notification_channel_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.NotificationChannelServiceClient() - - # Setup request - name = client.notification_channel_path("[PROJECT]", "[NOTIFICATION_CHANNEL]") - code = "code3059181" - - with pytest.raises(CustomException): - client.verify_notification_channel(name, code) diff --git a/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py b/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py deleted file mode 100644 index 6899ddd4..00000000 --- a/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py +++ /dev/null @@ -1,481 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3.proto import service_pb2 -from google.cloud.monitoring_v3.proto import service_service_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestServiceMonitoringServiceClient(object): - def test_create_service(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = service_pb2.Service(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - service = {} - - response = client.create_service(parent, service) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.CreateServiceRequest( - parent=parent, service=service - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_service_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - parent = client.project_path("[PROJECT]") - service = {} - - with pytest.raises(CustomException): - client.create_service(parent, service) - - def test_get_service(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = service_pb2.Service(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - name = client.service_path("[PROJECT]", "[SERVICE]") - - response = client.get_service(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.GetServiceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_service_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - name = client.service_path("[PROJECT]", "[SERVICE]") - - with pytest.raises(CustomException): - client.get_service(name) - - def test_list_services(self): - # Setup Expected Response - next_page_token = "" - services_element = {} - services = [services_element] - expected_response = {"next_page_token": next_page_token, "services": services} - expected_response = service_service_pb2.ListServicesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_services(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.services[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.ListServicesRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_services_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_services(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_service(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = service_pb2.Service(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - service = {} - - response = client.update_service(service) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.UpdateServiceRequest(service=service) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_service_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - service = {} - - with pytest.raises(CustomException): - client.update_service(service) - - def test_delete_service(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - name = client.service_path("[PROJECT]", "[SERVICE]") - - client.delete_service(name) - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.DeleteServiceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_service_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - name = client.service_path("[PROJECT]", "[SERVICE]") - - with pytest.raises(CustomException): - client.delete_service(name) - - def test_create_service_level_objective(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - goal = 317825.0 - expected_response = {"name": name, "display_name": display_name, "goal": goal} - expected_response = service_pb2.ServiceLevelObjective(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - parent = client.service_path("[PROJECT]", "[SERVICE]") - service_level_objective = {} - - response = client.create_service_level_objective( - parent, service_level_objective - ) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.CreateServiceLevelObjectiveRequest( - parent=parent, service_level_objective=service_level_objective - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_service_level_objective_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - parent = client.service_path("[PROJECT]", "[SERVICE]") - service_level_objective = {} - - with pytest.raises(CustomException): - client.create_service_level_objective(parent, service_level_objective) - - def test_get_service_level_objective(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - goal = 317825.0 - expected_response = {"name": name_2, "display_name": display_name, "goal": goal} - expected_response = service_pb2.ServiceLevelObjective(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - name = client.service_level_objective_path( - "[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]" - ) - - response = client.get_service_level_objective(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.GetServiceLevelObjectiveRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_service_level_objective_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - name = client.service_level_objective_path( - "[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]" - ) - - with pytest.raises(CustomException): - client.get_service_level_objective(name) - - def test_list_service_level_objectives(self): - # Setup Expected Response - next_page_token = "" - service_level_objectives_element = {} - service_level_objectives = [service_level_objectives_element] - expected_response = { - "next_page_token": next_page_token, - "service_level_objectives": service_level_objectives, - } - expected_response = service_service_pb2.ListServiceLevelObjectivesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - parent = client.service_path("[PROJECT]", "[SERVICE]") - - paged_list_response = client.list_service_level_objectives(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.service_level_objectives[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.ListServiceLevelObjectivesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_service_level_objectives_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - parent = client.service_path("[PROJECT]", "[SERVICE]") - - paged_list_response = client.list_service_level_objectives(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_service_level_objective(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - goal = 317825.0 - expected_response = {"name": name, "display_name": display_name, "goal": goal} - expected_response = service_pb2.ServiceLevelObjective(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - service_level_objective = {} - - response = client.update_service_level_objective(service_level_objective) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.UpdateServiceLevelObjectiveRequest( - service_level_objective=service_level_objective - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_service_level_objective_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - service_level_objective = {} - - with pytest.raises(CustomException): - client.update_service_level_objective(service_level_objective) - - def test_delete_service_level_objective(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup Request - name = client.service_level_objective_path( - "[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]" - ) - - client.delete_service_level_objective(name) - - assert len(channel.requests) == 1 - expected_request = service_service_pb2.DeleteServiceLevelObjectiveRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_service_level_objective_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.ServiceMonitoringServiceClient() - - # Setup request - name = client.service_level_objective_path( - "[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]" - ) - - with pytest.raises(CustomException): - client.delete_service_level_objective(name) diff --git a/tests/unit/gapic/v3/test_uptime_check_service_client_v3.py b/tests/unit/gapic/v3/test_uptime_check_service_client_v3.py deleted file mode 100644 index 6922164f..00000000 --- a/tests/unit/gapic/v3/test_uptime_check_service_client_v3.py +++ /dev/null @@ -1,326 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3.proto import uptime_pb2 -from google.cloud.monitoring_v3.proto import uptime_service_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestUptimeCheckServiceClient(object): - def test_list_uptime_check_configs(self): - # Setup Expected Response - next_page_token = "" - total_size = 705419236 - uptime_check_configs_element = {} - uptime_check_configs = [uptime_check_configs_element] - expected_response = { - "next_page_token": next_page_token, - "total_size": total_size, - "uptime_check_configs": uptime_check_configs, - } - expected_response = uptime_service_pb2.ListUptimeCheckConfigsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_uptime_check_configs(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.uptime_check_configs[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = uptime_service_pb2.ListUptimeCheckConfigsRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_uptime_check_configs_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_uptime_check_configs(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_uptime_check_config(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - is_internal = True - expected_response = { - "name": name_2, - "display_name": display_name, - "is_internal": is_internal, - } - expected_response = uptime_pb2.UptimeCheckConfig(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup Request - name = client.uptime_check_config_path("[PROJECT]", "[UPTIME_CHECK_CONFIG]") - - response = client.get_uptime_check_config(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = uptime_service_pb2.GetUptimeCheckConfigRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_uptime_check_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup request - name = client.uptime_check_config_path("[PROJECT]", "[UPTIME_CHECK_CONFIG]") - - with pytest.raises(CustomException): - client.get_uptime_check_config(name) - - def test_create_uptime_check_config(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - is_internal = True - expected_response = { - "name": name, - "display_name": display_name, - "is_internal": is_internal, - } - expected_response = uptime_pb2.UptimeCheckConfig(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - uptime_check_config = {} - - response = client.create_uptime_check_config(parent, uptime_check_config) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = uptime_service_pb2.CreateUptimeCheckConfigRequest( - parent=parent, uptime_check_config=uptime_check_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_uptime_check_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup request - parent = client.project_path("[PROJECT]") - uptime_check_config = {} - - with pytest.raises(CustomException): - client.create_uptime_check_config(parent, uptime_check_config) - - def test_update_uptime_check_config(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - is_internal = True - expected_response = { - "name": name, - "display_name": display_name, - "is_internal": is_internal, - } - expected_response = uptime_pb2.UptimeCheckConfig(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup Request - uptime_check_config = {} - - response = client.update_uptime_check_config(uptime_check_config) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = uptime_service_pb2.UpdateUptimeCheckConfigRequest( - uptime_check_config=uptime_check_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_uptime_check_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup request - uptime_check_config = {} - - with pytest.raises(CustomException): - client.update_uptime_check_config(uptime_check_config) - - def test_delete_uptime_check_config(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup Request - name = client.uptime_check_config_path("[PROJECT]", "[UPTIME_CHECK_CONFIG]") - - client.delete_uptime_check_config(name) - - assert len(channel.requests) == 1 - expected_request = uptime_service_pb2.DeleteUptimeCheckConfigRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_uptime_check_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - # Setup request - name = client.uptime_check_config_path("[PROJECT]", "[UPTIME_CHECK_CONFIG]") - - with pytest.raises(CustomException): - client.delete_uptime_check_config(name) - - def test_list_uptime_check_ips(self): - # Setup Expected Response - next_page_token = "" - uptime_check_ips_element = {} - uptime_check_ips = [uptime_check_ips_element] - expected_response = { - "next_page_token": next_page_token, - "uptime_check_ips": uptime_check_ips, - } - expected_response = uptime_service_pb2.ListUptimeCheckIpsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - paged_list_response = client.list_uptime_check_ips() - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.uptime_check_ips[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = uptime_service_pb2.ListUptimeCheckIpsRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_uptime_check_ips_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = monitoring_v3.UptimeCheckServiceClient() - - paged_list_response = client.list_uptime_check_ips() - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index 754a2dc6..23b47193 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -18,6 +18,13 @@ import unittest import mock +from google.cloud import monitoring_v3 as monitoring_v3 +from google.cloud.monitoring_v3 import MetricServiceClient +from google.cloud.monitoring_v3.services.metric_service.transports import ( + MetricServiceGrpcTransport, +) + + PROJECT = "my-project" METRIC_TYPE = "compute.googleapis.com/instance/uptime" @@ -69,12 +76,9 @@ class ChannelStub(object): """Stub for the grpc.Channel interface.""" def __init__(self, responses=[]): - from google.cloud.monitoring_v3.proto import metric_service_pb2 - self.responses = responses self.responses = [ - metric_service_pb2.ListTimeSeriesResponse(**response) - for response in responses + monitoring_v3.ListTimeSeriesResponse(**response) for response in responses ] self.requests = [] @@ -94,21 +98,18 @@ def _make_one(self, *args, **kwargs): @staticmethod def _make_interval(end_time, start_time=None): - from google.cloud.monitoring_v3 import types - - interval = types.TimeInterval() - interval.end_time.FromDatetime(end_time) - if start_time is not None: - interval.start_time.FromDatetime(start_time) + interval = monitoring_v3.TimeInterval(end_time=end_time, start_time=start_time) return interval - def test_constructor_minimal(self): - from google.cloud.monitoring_v3 import MetricServiceClient - - # Mock the API response - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + @staticmethod + def _create_client(channel=None): + if channel is None: + channel = ChannelStub() + transport = MetricServiceGrpcTransport(channel=channel) + return MetricServiceClient(transport=transport) + def test_constructor_minimal(self): + client = self._create_client() query = self._make_one(client, PROJECT) self.assertEqual(query._client, client) @@ -125,14 +126,11 @@ def test_constructor_minimal(self): self.assertEqual(query._group_by_fields, ()) def test_constructor_maximal(self): - from google.cloud.monitoring_v3 import MetricServiceClient - T1 = datetime.datetime(2016, 4, 7, 2, 30, 30) DAYS, HOURS, MINUTES = 1, 2, 3 T0 = T1 - datetime.timedelta(days=DAYS, hours=HOURS, minutes=MINUTES) - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one( client, PROJECT, @@ -155,15 +153,12 @@ def test_constructor_maximal(self): self.assertEqual(query._group_by_fields, ()) def test_constructor_default_end_time(self): - from google.cloud.monitoring_v3 import MetricServiceClient - MINUTES = 5 NOW = datetime.datetime(2016, 4, 7, 2, 30, 30) T0 = datetime.datetime(2016, 4, 7, 2, 25, 0) T1 = datetime.datetime(2016, 4, 7, 2, 30, 0) - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() with mock.patch("google.cloud.monitoring_v3.query._UTCNOW", new=lambda: NOW): query = self._make_one(client, PROJECT, METRIC_TYPE, minutes=MINUTES) @@ -171,46 +166,31 @@ def test_constructor_default_end_time(self): self.assertEqual(query._end_time, T1) def test_constructor_nonzero_duration_illegal(self): - from google.cloud.monitoring_v3 import MetricServiceClient - T1 = datetime.datetime(2016, 4, 7, 2, 30, 30) - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() with self.assertRaises(ValueError): self._make_one(client, PROJECT, METRIC_TYPE, end_time=T1) def test_execution_without_interval_illegal(self): - from google.cloud.monitoring_v3 import MetricServiceClient - - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) with self.assertRaises(ValueError): list(query) def test_metric_type(self): - from google.cloud.monitoring_v3 import MetricServiceClient - - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) self.assertEqual(query.metric_type, METRIC_TYPE) def test_filter(self): - from google.cloud.monitoring_v3 import MetricServiceClient - - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) expected = 'metric.type = "{type}"'.format(type=METRIC_TYPE) self.assertEqual(query.filter, expected) def test_filter_by_group(self): - from google.cloud.monitoring_v3 import MetricServiceClient - GROUP = "1234567" - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) query = query.select_group(GROUP) expected = ('metric.type = "{type}"' ' AND group.id = "{group}"').format( @@ -219,11 +199,8 @@ def test_filter_by_group(self): self.assertEqual(query.filter, expected) def test_filter_by_projects(self): - from google.cloud.monitoring_v3 import MetricServiceClient - PROJECT1, PROJECT2 = "project-1", "project-2" - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) query = query.select_projects(PROJECT1, PROJECT2) expected = ( @@ -233,11 +210,8 @@ def test_filter_by_projects(self): self.assertEqual(query.filter, expected) def test_filter_by_resources(self): - from google.cloud.monitoring_v3 import MetricServiceClient - ZONE_PREFIX = "europe-" - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) query = query.select_resources(zone_prefix=ZONE_PREFIX) expected = ( @@ -247,11 +221,8 @@ def test_filter_by_resources(self): self.assertEqual(query.filter, expected) def test_filter_by_metrics(self): - from google.cloud.monitoring_v3 import MetricServiceClient - INSTANCE = "my-instance" - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) query = query.select_metrics(instance_name=INSTANCE) expected = ( @@ -260,29 +231,21 @@ def test_filter_by_metrics(self): self.assertEqual(query.filter, expected) def test_request_parameters_minimal(self): - from google.cloud.monitoring_v3 import MetricServiceClient - from google.cloud.monitoring_v3.gapic import enums - T1 = datetime.datetime(2016, 4, 7, 2, 30, 0) - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) query = query.select_interval(end_time=T1) actual = query._build_query_params() expected = { "name": u"projects/{}".format(PROJECT), - "filter_": 'metric.type = "{type}"'.format(type=METRIC_TYPE), + "filter": 'metric.type = "{type}"'.format(type=METRIC_TYPE), "interval": self._make_interval(T1), - "view": enums.ListTimeSeriesRequest.TimeSeriesView.FULL, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, } self.assertEqual(actual, expected) def test_request_parameters_maximal(self): - from google.cloud.monitoring_v3 import MetricServiceClient - from google.cloud.monitoring_v3 import types - from google.cloud.monitoring_v3.gapic import enums - T0 = datetime.datetime(2016, 4, 7, 2, 0, 0) T1 = datetime.datetime(2016, 4, 7, 2, 30, 0) @@ -294,8 +257,7 @@ def test_request_parameters_maximal(self): PAGE_SIZE = 100 - channel = ChannelStub() - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) query = query.select_interval(start_time=T0, end_time=T1) query = query.align(ALIGNER, minutes=MINUTES, seconds=SECONDS) @@ -303,24 +265,20 @@ def test_request_parameters_maximal(self): actual = query._build_query_params(headers_only=True, page_size=PAGE_SIZE) expected = { "name": "projects/%s" % PROJECT, - "filter_": 'metric.type = "{type}"'.format(type=METRIC_TYPE), + "filter": 'metric.type = "{type}"'.format(type=METRIC_TYPE), "interval": self._make_interval(T1, T0), - "aggregation": types.Aggregation( + "aggregation": monitoring_v3.Aggregation( per_series_aligner=ALIGNER, alignment_period={"seconds": PERIOD_IN_SECONDS}, cross_series_reducer=REDUCER, group_by_fields=[FIELD1, FIELD2], ), - "view": enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.HEADERS, "page_size": PAGE_SIZE, } self.assertEqual(actual, expected) def test_iteration(self): - from google.cloud.monitoring_v3 import MetricServiceClient - from google.cloud.monitoring_v3.gapic import enums - from google.cloud.monitoring_v3.proto import metric_service_pb2 - T0 = datetime.datetime(2016, 4, 6, 22, 5, 0) T1 = datetime.datetime(2016, 4, 6, 22, 10, 0) @@ -330,31 +288,43 @@ def test_iteration(self): VALUE1 = 60 # seconds VALUE2 = 60.001 # seconds - SERIES1 = { - "metric": {"type": METRIC_TYPE, "labels": METRIC_LABELS}, - "resource": {"type": RESOURCE_TYPE, "labels": RESOURCE_LABELS}, - "metric_kind": METRIC_KIND, - "value_type": VALUE_TYPE, - "points": [ - {"interval": INTERVAL2, "value": {"double_value": VALUE1}}, - {"interval": INTERVAL1, "value": {"double_value": VALUE1}}, - ], - } - SERIES2 = { - "metric": {"type": METRIC_TYPE, "labels": METRIC_LABELS2}, - "resource": {"type": RESOURCE_TYPE, "labels": RESOURCE_LABELS2}, - "metric_kind": METRIC_KIND, - "value_type": VALUE_TYPE, - "points": [ - {"interval": INTERVAL2, "value": {"double_value": VALUE2}}, - {"interval": INTERVAL1, "value": {"double_value": VALUE2}}, - ], - } + # Currently cannot create from a list of dict for repeated fields due to + # https://github.com/googleapis/proto-plus-python/issues/135 + POINT1 = monitoring_v3.Point( + {"interval": INTERVAL2, "value": {"double_value": VALUE1}} + ) + POINT2 = monitoring_v3.Point( + {"interval": INTERVAL1, "value": {"double_value": VALUE1}} + ) + POINT3 = monitoring_v3.Point( + {"interval": INTERVAL2, "value": {"double_value": VALUE2}} + ) + POINT4 = monitoring_v3.Point( + {"interval": INTERVAL1, "value": {"double_value": VALUE2}} + ) + SERIES1 = monitoring_v3.TimeSeries( + { + "metric": {"type": METRIC_TYPE, "labels": METRIC_LABELS}, + "resource": {"type": RESOURCE_TYPE, "labels": RESOURCE_LABELS}, + "metric_kind": METRIC_KIND, + "value_type": VALUE_TYPE, + "points": [POINT1, POINT2], + } + ) + SERIES2 = monitoring_v3.TimeSeries( + { + "metric": {"type": METRIC_TYPE, "labels": METRIC_LABELS2}, + "resource": {"type": RESOURCE_TYPE, "labels": RESOURCE_LABELS2}, + "metric_kind": METRIC_KIND, + "value_type": VALUE_TYPE, + "points": [POINT3, POINT4], + } + ) RESPONSE = {"time_series": [SERIES1, SERIES2], "next_page_token": ""} channel = ChannelStub(responses=[RESPONSE]) - client = MetricServiceClient(channel=channel) + client = self._create_client(channel) query = self._make_one(client, PROJECT, METRIC_TYPE) query = query.select_interval(start_time=T0, end_time=T1) response = list(query) @@ -376,45 +346,32 @@ def test_iteration(self): self.assertEqual([p.interval for p in series1.points], [INTERVAL2, INTERVAL1]) self.assertEqual([p.interval for p in series2.points], [INTERVAL2, INTERVAL1]) - expected_request = metric_service_pb2.ListTimeSeriesRequest( + expected_request = monitoring_v3.ListTimeSeriesRequest( name="projects/" + PROJECT, filter='metric.type = "{type}"'.format(type=METRIC_TYPE), interval=self._make_interval(T1, T0), - view=enums.ListTimeSeriesRequest.TimeSeriesView.FULL, + view=monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, ) request = channel.requests[0][1] self.assertEqual(request, expected_request) def test_iteration_empty(self): - from google.cloud.monitoring_v3 import MetricServiceClient - from google.cloud.monitoring_v3.gapic import enums - from google.cloud.monitoring_v3.proto import metric_service_pb2 - T0 = datetime.datetime(2016, 4, 6, 22, 5, 0) T1 = datetime.datetime(2016, 4, 6, 22, 10, 0) - channel = ChannelStub(responses=[{"next_page_token": ""}]) - client = MetricServiceClient(channel=channel) + client = self._create_client() query = self._make_one(client, PROJECT, METRIC_TYPE) - query = query.select_interval(start_time=T0, end_time=T1) - response = list(query) - self.assertEqual(len(response), 0) + with mock.patch.object( + type(client._transport.list_time_series), "__call__" + ) as call: + call.return_value = monitoring_v3.ListTimeSeriesResponse() + query = query.select_interval(start_time=T0, end_time=T1) + response = list(query) - expected_request = metric_service_pb2.ListTimeSeriesRequest( - name="projects/" + PROJECT, - filter='metric.type = "{type}"'.format(type=METRIC_TYPE), - interval=self._make_interval(T1, T0), - view=enums.ListTimeSeriesRequest.TimeSeriesView.FULL, - ) - request = channel.requests[0][1] - self.assertEqual(request, expected_request) + self.assertEqual(len(response), 0) def test_iteration_headers_only(self): - from google.cloud.monitoring_v3 import MetricServiceClient - from google.cloud.monitoring_v3.gapic import enums - from google.cloud.monitoring_v3.proto import metric_service_pb2 - T0 = datetime.datetime(2016, 4, 6, 22, 5, 0) T1 = datetime.datetime(2016, 4, 6, 22, 10, 0) @@ -434,7 +391,7 @@ def test_iteration_headers_only(self): RESPONSE = {"time_series": [SERIES1, SERIES2], "next_page_token": ""} channel = ChannelStub(responses=[RESPONSE]) - client = MetricServiceClient(channel=channel) + client = self._create_client(channel) query = self._make_one(client, PROJECT, METRIC_TYPE) query = query.select_interval(start_time=T0, end_time=T1) response = list(query.iter(headers_only=True)) @@ -450,11 +407,11 @@ def test_iteration_headers_only(self): self.assertFalse(len(series1.points)) self.assertFalse(len(series2.points)) - expected_request = metric_service_pb2.ListTimeSeriesRequest( + expected_request = monitoring_v3.ListTimeSeriesRequest( name="projects/" + PROJECT, filter='metric.type = "{type}"'.format(type=METRIC_TYPE), interval=self._make_interval(T1, T0), - view=enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + view=monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.HEADERS, ) request = channel.requests[0][1] self.assertEqual(request, expected_request)